Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
quanshengwu committed May 7, 2024
1 parent 5ade846 commit 1b2cd91
Show file tree
Hide file tree
Showing 6 changed files with 86 additions and 28 deletions.
17 changes: 5 additions & 12 deletions src/Makefile.intel-mpi
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
OBJ = module.o sparse.o wt_aux.o math_lib.o mgmres.o symmetry.o \
c_fortran_zgssv.o readHmnR.o inverse.o proteus.o \
OBJ = module.o sparse.o wt_aux.o math_lib.o symmetry.o \
readHmnR.o inverse.o proteus.o \
eigen.o ham_qlayer2qlayer.o psi.o unfolding.o rand.o \
ham_slab.o ham_bulk.o ek_slab.o ek_bulk_polar.o ek_bulk.o \
readinput.o fermisurface.o surfgreen.o surfstat.o \
Expand All @@ -13,11 +13,9 @@ OBJ = module.o sparse.o wt_aux.o math_lib.o mgmres.o symmetry.o \
main.o

# compiler
F90 = mpif90 -fpp -DMPI -fpe3 -O3 # -DINTELMKL -DSUPERLU # -check all -traceback -g
#F90 = ifort -fpp -DINTELMKL -fpe3 -check all -traceback -g
CC = mpicc -fpp -DMPI -fpe3 -O3 # -DINTELMKL -DSUPERLU
F90 = mpif90 -fpp -DMPI -fpe3 -O3 -DARPACK -DINTELMKL # -check all -traceback -g

INCLUDE = #-I${MKLROOT}/include -I/Users/user/quan/work/workplace/github-repositories/superlu/SRC
INCLUDE = -I${MKLROOT}/include
WFLAG = -nogen-interface
OFLAG = -O3 -static-intel
FFLAG = $(OFLAG) $(WFLAG)
Expand All @@ -26,9 +24,6 @@ LFLAG = $(OFLAG)
# ARPACK LIBRARY
ARPACK=/Users/user/quan/work/workplace/ARPACK/libarpack_MAC.a

# need to specify if -DSUPERLU privided in the F90
SUPERLULIB=#/usr/local/lib/libsuperlu.a

# blas and lapack libraries
# static linking
#LIBS = -Wl,--start-group ${MKLROOT}/lib/intel64/libmkl_intel_lp64.a \
Expand All @@ -37,7 +32,7 @@ SUPERLULIB=#/usr/local/lib/libsuperlu.a
${ARPACK}

# dynamic linking
LIBS = ${ARPACK} -L/${MKLROOT}/lib/intel64 -lmkl_core -lmkl_sequential -lmkl_intel_lp64 -lpthread ${SUPERLULIB}
LIBS = ${ARPACK} -L/${MKLROOT}/lib/intel64 -lmkl_core -lmkl_sequential -lmkl_intel_lp64 -lpthread

main : $(OBJ)
$(F90) $(LFLAG) $(OBJ) -o wt.x $(LIBS)
Expand All @@ -47,8 +42,6 @@ main : $(OBJ)

.f90.o :
$(F90) $(FFLAG) $(INCLUDE) -c $*.f90
.c.o:
$(CC) $(CFLAGS) -I$(INCLUDE) -c $< $(VERBOSE)

clean :
rm -f *.o *.mod *~ wt.x
Expand Down
55 changes: 55 additions & 0 deletions src/Makefile.intel-mpi-for-developer
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
OBJ = module.o sparse.o wt_aux.o math_lib.o symmetry.o \
c_fortran_zgssv.o readHmnR.o inverse.o proteus.o \
eigen.o ham_qlayer2qlayer.o psi.o unfolding.o rand.o \
ham_slab.o ham_bulk.o ek_slab.o ek_bulk_polar.o ek_bulk.o \
readinput.o fermisurface.o surfgreen.o surfstat.o \
mat_mul.o ham_ribbon.o ek_ribbon.o \
fermiarc.o berrycurvature.o \
wanniercenter.o dos.o orbital_momenta.o \
landau_level_sparse.o landau_level.o lanczos_sparse.o \
berry.o wanniercenter_adaptive.o \
effective_mass.o findnodes.o \
sigma_OHE.o sigma.o Boltz_transport_anomalous.o \
main.o

# compiler
F90 = mpif90 -fpp -DMPI -fpe3 -O3 -DARPACK #-DINTELMKL -DSUPERLU # -check all -traceback -g
#F90 = ifort -fpp -DINTELMKL -fpe3 -check all -traceback -g
CC = mpicc -fpp -O3 #-DINTELMKL -DSUPERLU

INCLUDE = -I${MKLROOT}/include -I/Users/user/quan/work/workplace/github-repositories/superlu/SRC
WFLAG = -nogen-interface
OFLAG = -O3 -static-intel
FFLAG = $(OFLAG) $(WFLAG)
LFLAG = $(OFLAG)

# ARPACK LIBRARY
ARPACK=/Users/user/quan/work/workplace/ARPACK/libarpack_MAC.a

# need to specify if -DSUPERLU privided in the F90
SUPERLULIB=/usr/local/lib/libsuperlu.a

# blas and lapack libraries
# static linking
#LIBS = -Wl,--start-group ${MKLROOT}/lib/intel64/libmkl_intel_lp64.a \
${MKLROOT}/lib/intel64/libmkl_sequential.a \
${MKLROOT}/lib/intel64/libmkl_core.a -Wl,--end-group -lpthread -lm -ldl \
${ARPACK}

# dynamic linking
LIBS = ${ARPACK} -L/${MKLROOT}/lib/intel64 -lmkl_core -lmkl_sequential -lmkl_intel_lp64 -lpthread ${SUPERLULIB}

main : $(OBJ)
$(F90) $(LFLAG) $(OBJ) -o wt.x $(LIBS)
cp -f wt.x ../bin

.SUFFIXES: .o .f90 .c

.f90.o :
$(F90) $(FFLAG) $(INCLUDE) -c $*.f90
.c.o:
$(CC) $(CFLAGS) -I$(INCLUDE) -c $< $(VERBOSE)

clean :
rm -f *.o *.mod *~ wt.x

8 changes: 7 additions & 1 deletion src/module.f90
Original file line number Diff line number Diff line change
Expand Up @@ -587,13 +587,19 @@ module para
!> specify the atom index that located on the top surface that you want to study
integer :: topsurface_atom_index
real(dp) :: shift_to_topsurface_cart(3)

!> a tag to control how do we call ARPACK to diagonalize a sparse matrix
!> value: zndrv1 using A*x
!> zndrv2 using inv(A)
!> default "zndrv1"
character(20) :: arpack_solver

!> namelist parameters
namelist /PARAMETERS/ Eta_Arc,EF_broadening, OmegaNum, OmegaNum_unfold, OmegaMin, OmegaMax, &
E_arc, Nk1, Nk2, Nk3, NP, Gap_threshold, Tmin, Tmax, NumT, &
NBTau, BTauNum, BTauMax, Rcut, Magp, Magq, Magp_min, Magp_max, Nslice_BTau_Max, &
wcc_neighbour_tol, wcc_calc_tol, Beta,NumLCZVecs, &
Relaxation_Time_Tau, symprec, &
Relaxation_Time_Tau, symprec, arpack_solver, &
NumRandomConfs, NumSelectedEigenVals, projection_weight_mode, topsurface_atom_index

real(Dp) :: E_fermi ! Fermi energy, search E-fermi in OUTCAR for VASP, set to zero for Wien2k
Expand Down
1 change: 1 addition & 0 deletions src/readinput.f90
Original file line number Diff line number Diff line change
Expand Up @@ -568,6 +568,7 @@ subroutine readinput
Beta= 100
Relaxation_Time_Tau= 1d0 ! in ps
topsurface_atom_index= 0
arpack_solver= 'zndrv1'


!> by default, we only project on atoms for a given wave function
Expand Down
4 changes: 2 additions & 2 deletions src/sigma_OHE.f90
Original file line number Diff line number Diff line change
Expand Up @@ -1814,7 +1814,7 @@ subroutine evolve_k_ohe
open(unit=myfileindex(ib), file=evolvefilename)
write(myfileindex(ib), '(a10, i5, a16, f16.8)')'# evolve k ', bands_fermi_level(ib), 'energy level', Ek(ib)
write(myfileindex(ib), '("#", a13, 24a16)')'BTau (T.ps)', &
'OmegaTau', 'kx', 'ky', 'kz', 'vx', 'vy', 'vz', 'k1', 'k2','k3', 'Energy(ev)', "vx'", "vy'", "vz'"
'kx', 'ky', 'kz', 'vx', 'vy', 'vz', 'k1', 'k2','k3', 'Energy(ev)', "vx'", "vy'", "vz'"
endif
enddo
#if defined (MPI)
Expand Down Expand Up @@ -1898,7 +1898,7 @@ subroutine evolve_k_ohe
call direct_cart_rec(kout_all(:, it, ik), k)
call project_k3_to_kplane_defined_by_direction(v_t, Bdirection, v_t2)
write(myfileindex(ib), '(100f16.8)')Btau*Magneticfluxdensity_atomic/Relaxation_Time_Tau, &
Btau*Magneticfluxdensity_atomic/Relaxation_Time_Tau*0.175874356d0, k, v_t, kout_all(:, it, ik), E_iband, v_t2
k, v_t, kout_all(:, it, ik), E_iband, v_t2
enddo
endif
endif
Expand Down
29 changes: 16 additions & 13 deletions src/sparse.f90
Original file line number Diff line number Diff line change
Expand Up @@ -2302,12 +2302,19 @@ subroutine arpack_sparse_coo_eigs(ndims,nnzmax,nnz,acoo,jcoo,icoo,neval,nvecs,de
zeigv= 0d0

#if defined (INTELMKL)
!> zndrv2 needs a sparse solver to solve (A-sigma*I)*x=B with given A, sigma, and B
!> get eigenvalues of a sparse matrix by calling arpack subroutine
!> acoo, jcoo, icoo would be converted in to A-sigma*I, then converted into CSR format
call zmat_arpack_zndrv2(ndims, nnzmax, nnz, acoo, jcoo, icoo, sigma, neval, nvecs, deval, zeigv, ritzvec)
!> usually zndrv1 is about 10 times faster then zndrv2
if (arpack_solver=='zndrv2') then
call zmat_arpack_zndrv2(ndims, nnzmax, nnz, acoo, jcoo, icoo, sigma, neval, nvecs, deval, zeigv, ritzvec)
else
call zmat_arpack_zndrv1(ndims, nnzmax, nnz, acoo, jcoo, icoo, sigma, neval, nvecs, deval, zeigv, ritzvec)
endif
#else
!> here acoo, icoo, jcoo are stored in COO format
!> use matrix vector multiplication
!> zndrv1 needs a matrix vector multiplication operator A*x
call zmat_arpack_zndrv1(ndims, nnzmax, nnz, acoo, jcoo, icoo, sigma, neval, nvecs, deval, zeigv, ritzvec)
#endif

Expand Down Expand Up @@ -2619,7 +2626,7 @@ subroutine zmat_arpack_zndrv4(ndims, nnzmax, nnz, acsr, jcsr, icsr, &
call znaupd ( ido, bmat, n, which, nev, tol, resid, ncv, &
zeigv, ldv, iparam, ipntr, workd, workl, lworkl, rwork, info )
#else
STOP "ERROR : Please install WannierTools with ARPACK"
STOP "ERROR : Please install WannierTools with ARPACK since you are diagonalizing a large sparse matrix"
#endif
call now(time_end)
time_cost_znaupd= time_cost_znaupd+ time_end- time_start
Expand Down Expand Up @@ -2751,7 +2758,7 @@ subroutine zmat_arpack_zndrv4(ndims, nnzmax, nnz, acsr, jcsr, icsr, &
zeigv, ldv, iparam, ipntr, workd, workl, lworkl, &
rwork, ierr)
#else
STOP "ERROR : Please install WannierTools with ARPACK"
STOP "ERROR : Please install WannierTools with ARPACK since you are diagonalizing a large sparse matrix"
#endif
!
! %----------------------------------------------%
Expand Down Expand Up @@ -3103,7 +3110,7 @@ subroutine zmat_arpack_zndrv3(ndims, nnzmax, nnz, acsr, jcsr, icsr, snnzmax, snn
call znaupd ( ido, bmat, n, which, nev, tol, resid, ncv, &
zeigv, ldv, iparam, ipntr, workd, workl, lworkl, rwork, info )
#else
STOP "ERROR : Please install WannierTools with ARPACK"
STOP "ERROR : Please install WannierTools with ARPACK since you are diagonalizing a large sparse matrix"
#endif

iter = iter + 1
Expand Down Expand Up @@ -3197,7 +3204,7 @@ subroutine zmat_arpack_zndrv3(ndims, nnzmax, nnz, acsr, jcsr, icsr, snnzmax, snn
zeigv, ldv, iparam, ipntr, workd, workl, lworkl, &
rwork, ierr)
#else
STOP "ERROR : Please install WannierTools with ARPACK"
STOP "ERROR : Please install WannierTools with ARPACK since you are diagonalizing a large sparse matrix"
#endif
!
! %----------------------------------------------%
Expand Down Expand Up @@ -3528,7 +3535,7 @@ subroutine zmat_arpack_zndrv1(ndims, nnzmax, nnz, acsr, jcsr, icsr, sigma, neval
call znaupd ( ido, bmat, n, which, nev, tol, resid, ncv, &
zeigv, ldv, iparam, ipntr, workd, workl, lworkl, rwork, info )
#else
STOP "ERROR : Please install WannierTools with ARPACK"
STOP "ERROR : Please install WannierTools with ARPACK since you are diagonalizing a large sparse matrix"
#endif

iter = iter + 1
Expand Down Expand Up @@ -3662,7 +3669,7 @@ subroutine zmat_arpack_zndrv1(ndims, nnzmax, nnz, acsr, jcsr, icsr, sigma, neval
!
if ( info .eq. 1) then
if (cpuid==0) write(stdout, *) ' '
if (cpuid==0) write(stdout, *) ' Maximum number of iterations reached.'
if (cpuid==0) write(stdout, *) ' Maximum number of iterations reached. try increasing NCV'
if (cpuid==0) write(stdout, *) ' '
else if ( info .eq. 3) then
if (cpuid==0) write(stdout, *) ' '
Expand Down Expand Up @@ -3951,7 +3958,7 @@ subroutine zmat_arpack_zndrv2(ndims, nnzmax, nnz, acsr, jcsr, icsr, sigma,neval,
call znaupd ( ido, bmat, n, which, nev, tol, resid, ncv, &
zeigv, ldv, iparam, ipntr, workd, workl, lworkl, rwork,info )
#else
STOP "ERROR : Please install WannierTools with ARPACK"
STOP "ERROR : Please install WannierTools with ARPACK since you are diagonalizing a large sparse matrix"
#endif

iter = iter + 1
Expand All @@ -3969,10 +3976,6 @@ subroutine zmat_arpack_zndrv2(ndims, nnzmax, nnz, acsr, jcsr, icsr, sigma,neval,
! | the result to workd(ipntr(2)). |
! %-------------------------------------------%
!
! call zcopy( n, workd(ipntr(1)),1, workd(ipntr(2)), 1)
!
! call zgttrs('N', n, 1, dl, dd, du, du2, ipiv, workd(ipntr(2)), n, ierr)
!call zmat_mkldss_zgesv(ndims, nnz, acsr, jcsr, icsr, workd(ipntr(1)), workd(ipntr(2)))
call sparse_solver(ndims, nnz, acsr, icsr, jcsr, workd(ipntr(1)), workd(ipntr(2)))
!
! %-----------------------------------------%
Expand Down Expand Up @@ -4019,7 +4022,7 @@ subroutine zmat_arpack_zndrv2(ndims, nnzmax, nnz, acsr, jcsr, icsr, sigma,neval,
zeigv, ldv, iparam, ipntr, workd, workl, lworkl, &
rwork, ierr)
#else
STOP "ERROR : Please install WannierTools with ARPACK"
STOP "ERROR : Please install WannierTools with ARPACK since you are diagonalizing a large sparse matrix"
#endif
!
! %----------------------------------------------%
Expand Down

0 comments on commit 1b2cd91

Please sign in to comment.