22#if defined(SPM_WITH_MPI)
56#if !defined(PRECISION_p)
73 for ( j = 0; j < spm->
n; j++, oldrow+=size )
75 jg = l2g_sorted[j] - baseval;
77 k = newcol[jl] - baseval;
78 size = newcol[jl+1] - newcol[jl];
79 memcpy( newrow + k, oldrow, size *
sizeof(
spm_int_t) );
81 assert( (oldrow - oldrowptr) == spm->
nnz );
83#if !defined(PRECISION_p)
93 for ( j = 0; j < spm->
n; j++ )
95 jg = l2g_sorted[j] - baseval;
97 size = dofshift[jl+1] - dofshift[jl];
105 dofj = dofs[jg+1] - dofs[jg];
108 for( k = newcol[jl]; k < newcol[jl+1]; k++, oldrow++ )
110 ig = *oldrow - baseval;
111 dofi += dofs[ig+1] - dofs[ig];
115 assert( (
size_t)(dofi * dofj) == size );
119 memcpy( newval + dofshift[jl], oldval, size *
sizeof(
int) );
122 assert( (oldrow - oldrowptr) == spm->
nnz );
158#if !defined(PRECISION_p)
163 dof2 = spm->
dof * spm->
dof;
178 for ( j = 0; j < spm->
n; j++, oldrow+=size )
180 jg = l2g_sorted[j] - baseval;
182 k = newcol[jl] - baseval;
183 size = newcol[jl+1] - newcol[jl];
184 memcpy( newrow + k, oldrow, size *
sizeof(
spm_int_t) );
186#if !defined(PRECISION_p)
187 memcpy( newval + k * dof2, oldval,
188 size * dof2 *
sizeof(
int) );
189 oldval += size * dof2;
192 assert( (oldrow - oldrowptr) == spm->
nnz );
228 fprintf( stderr,
"spmConvert: Conversion of non column distributed matrices to CSC is not yet implemented\n");
239 spmIntSort1Asc1( l2g_sorted, spm->
n );
244 for ( k=0; k<spm->
nnz; k++, oldcol++ )
246 jg = *oldcol - baseval;
259 for (j=0; j<(spm->
n+1); j++, newcol++)
265 assert( (total-baseval) == spm->
nnz );
272 for ( j=0; j<(spm->
n-1); j++, loc2glob++ )
274 if ( loc2glob[0] > loc2glob[1] ) {
294 int *oldval = spm->
values;
298#if !defined(PRECISION_p)
307 if ( spm->
dof > 0 ) {
308 p_spm_dijv2csc_cdof( spm, oldrow, oldval, l2g_sorted );
311 p_spm_dijv2csc_vdof( spm, oldrow, oldval, l2g_sorted );
357#if defined(SPM_WITH_MPI)
359 return p_spm_dijv2csc( spm );
368 for (k=0; k<spm->
nnz; k++, oldcol++)
370 j = *oldcol - baseval;
379 for (j=0; j<(spm->
n+1); j++, newcol++)
385 assert( (total - baseval) == spm->
nnz );
430#if defined(PRECISION_z) || defined(PRECISION_c)
442 for ( jj = 0; jj < dofj; jj++ ) {
443 for ( ii = 0; ii < dofi; ii++, valptr++ ) {
444 if ( ( col + jj ) == ( row + ii ) ) {
447 *valptr = ( *valptr );
452 for ( ii = 0; ii < dofi; ii++ ) {
453 for ( jj = 0; jj < dofj; jj++, valptr++ ) {
454 if ( ( col + jj ) == ( row + ii ) ) {
457 *valptr = ( *valptr );
489 int *valptr = spm->
values;
504 for( i=0; i<spm->
n; i++, rowptr++, loc2glob++ )
506 ig = spm->
replicated ? i : (*loc2glob) - baseval;
507 if ( spm->
dof > 0 ) {
512 dofi = dofs[ig+1] - dofs[ig];
513 row = dofs[ig] - baseval;
516 for( k=rowptr[0]; k<rowptr[1]; k++, colptr++ )
518 jg = (*colptr - baseval);
519 if ( spm->
dof > 0 ) {
524 dofj = dofs[jg+1] - dofs[jg];
525 col = dofs[jg] - baseval;
528 p_spmConvert_conj_elt( spm->
layout,
529 row, dofi, col, dofj, valptr );
530 valptr += dofi * dofj;
570#if !defined(PRECISION_p)
571 int *val_csc, *valtmp;
572 int *valptr = (
int *)(spm->
values);
576#if defined(SPM_WITH_MPI)
587 row_csc = malloc( nnz *
sizeof(
spm_int_t) );
588 col_csc = calloc( spm->
n+1,
sizeof(
spm_int_t) );
593#if !defined(PRECISION_p)
594 val_csc = malloc( spm->
nnzexp *
sizeof(
int) );
600 for (j=0; j<nnz; j++) {
601 col = spm->
colptr[j] - baseval;
602 assert( col < spm->n );
608 for (j=0; j<spm->
n; j++){
609 col_csc[j+1] += col_csc[j];
612 assert( col_csc[spm->
gN] == nnz );
614 for (row=0; row<spm->
n; row++) {
618 for ( k=fcol; k<lcol; k++ ) {
619 col = spm->
colptr[k] - baseval;
621 row_csc[j] = row + baseval;
623#if !defined(PRECISION_p)
624 val_csc[j] = valptr[k];
635 col_csc[0] = baseval;
636 for ( j=0; j<spm->
n; j++ ) {
638 col_csc[j+1] = tmp + baseval;
648#if !defined(PRECISION_p)
649 if ( spm->
dof != 1 ) {
658 for ( col = 0; col < spm->
n; col++, coltmp++ )
660 dofj = (dof > 0) ? dof : dofs[col+1] - dofs[col];
661 for ( k = coltmp[0]; k < coltmp[1]; k++, rowtmp++ )
663 row = *rowtmp - baseval;
664 dofi = (dof > 0) ? dof : dofs[row+1] - dofs[row];
669 for ( j = spm->
rowptr[row]; j < spm->
rowptr[row + 1]; j++, colcsr++ )
671 if( col == (*colcsr - baseval) ){
675 idx = validx[colcsr - spm->
colptr];
677 memcpy( valtmp, valptr + idx, dof2 *
sizeof(
int ) );
682 assert( (valtmp - val_csc) == spm->
nnzexp );
691#if !defined(PRECISION_p)
731#if defined(PRECISION_z) || defined(PRECISION_c)
733 return p_spmConvertCSR2CSC_her( spm );
enum spm_layout_e spm_layout_t
Direction of the matrix storage.
#define SpmDistByColumn
Distribution of the matrix storage.
spm_int_t * spm_get_value_idx_by_elt(const spmatrix_t *spm)
Create an array that represents the shift for each sub-element of the original multidof value array.
void p_spmSort(spmatrix_t *spm)
This routine sorts the spm matrix.
spm_int_t * spm_get_value_idx_by_col(const spmatrix_t *spm)
Create an array that represents the shift for each sub-element of the original multidof value array.
int p_spmConvertCSR2CSC(spmatrix_t *spm)
convert a matrix in CSR format to a matrix in CSC format.
static int p_spmConvertCSR2CSC_gen(spmatrix_t *spm)
convert a general matrix in CSR format to a matrix in CSC format.
int p_spmConvertIJV2CSC(spmatrix_t *spm)
convert a matrix in IJV format to a matrix in CSC format.
static int p_spmConvertCSR2CSC_sym(spmatrix_t *spm)
convert a symmetric matrix in CSR format to a matrix in CSC format.
void spmExit(spmatrix_t *spm)
Cleanup the spm structure but do not free the spm pointer.
int spm_int_t
The main integer datatype used in spm arrays.
The sparse matrix data structure.
spm_int_t * spm_getandset_glob2loc(spmatrix_t *spm)
Computes the glob2loc array if needed, and returns it.
int spm_get_distribution(const spmatrix_t *spm)
Search the distribution pattern used in the spm structure.