Commit fcdf59a7 authored by Zimmer, Christopher's avatar Zimmer, Christopher

Initial Commit

parents
project(libiointercept)
cmake_minimum_required(VERSION 2.6)
option(DEBUG "Use debugging symbols" OFF)
option(TITAN "Build for titan emulation" OFF)
set(CMAKE_SHARED_LIBRARY_LINK_C_FLAGS)
if(DEBUG)
add_definitions(-DDEBUG)
set(CMAKE_C_FLAGS "-g -Werror")
else()
set(CMAKE_C_FLAGS "-g -O3 -Werror")
endif()
add_subdirectory(src)
if (TITAN)
add_definitions(-DTITAN)
endif()
add_subdirectory(examples)
file(COPY test DESTINATION .)
Base execution model is BBProxy based.
If any BBProxy calls fail or any handles fail
- Spectral will fallback to a synchronous copy model using a pthread to copy things out.
Summit supports the ability to accelerate application output through the use of
the node-local burst buffer. A defining feature of the burst buffer is the
ability to background drain data to the capacity store without impacting
on-node performance. Using this feature requires the use the of the IBM BBAPI
within applications. Unfortunately, this API is IBM specific and non-portable.
Spectral is a library that seeks to overcome this portability limitation by
enabling transparent use of the IBM BBAPI through library intercept techniques.
Spectral works out of the box on against traditional file per process output
strategies. During an applications output phase, traditionally defined as open,
write, close, barrier, and back to simulation. Spectral will intercept
each processes close call. During this intercept, spectral will check the
directory of the written FD, and verify that it matches the user specified
burst buffer directory. When this match is verified, spectral will
automatically schedule the transfer of the file with the IBM Burst Buffer
server software. Once completed, Spectral will return control to the
application to continue simulation. The data will be drained off of the burst
buffer using NVMe over Fabrics offloading, imparting virtually no utilization
on the compute node.
Essentially, Spectral enables the use of Summits Burst Buffer for many
applications with only small modifications to batch scripts and configuration
files.
Implement function pointer calls for BB Targets that can be reconfigured
Need to determine a better way to create a tag that will reduce overlap
Make sure return codes are all consistent
Develop state machine for fall back mechanisms
find_package(MPI)
set(CMAKE_C_COMPILER "mpicc")
if(DEBUG)
add_definitions(-DDEBUG)
set(CMAKE_C_FLAGS "-g -Werror -std=c99")
else()
set(CMAKE_C_FLAGS "-g -O2 -Werror -std=c99")
endif()
if (TITAN)
add_definitions(-DTITAN)
endif()
add_executable(test_dynamic test.c)
add_executable(test_static test.c)
add_executable(test_mpi_static test_mpi.c)
if (TITAN)
include_directories(${CMAKE_SOURCE_DIR}/libemulatebbapi)
endif()
target_link_libraries(test_static spectralstatic)
target_link_libraries(test_mpi_static spectralstatic)
if (TITAN)
target_link_libraries(test_static emulate_bbapi)
target_link_libraries(test_mpi_static emulate_bbapi)
target_link_libraries(test_dynamic emulate_bbapi_dynamic)
set_target_properties(test_dynamic PROPERTIES LINK_FLAGS "-dynamic ")
set_target_properties(test_mpi_static PROPERTIES LINK_FLAGS "-Wl,--wrap,close -Wl,--wrap,fclose ")
set_target_properties(test_static PROPERTIES LINK_FLAGS "-Wl,--wrap,close -Wl,--wrap,fclose ")
else()
set_target_properties(test_static PROPERTIES LINK_FLAGS "-Wl,--wrap,close -Wl,--wrap,fclose -Wl,--rpath /opt/ibm/bb/lib -L/opt/ibm/bb/lib -lbbAPI ")
set_target_properties(test_dynamic PROPERTIES LINK_FLAGS "-dynamic -ldl -Wl,--rpath /opt/ibm/bb/lib -L/opt/ibm/bb/lib -lbbAPI ")
set_target_properties(test_mpi_static PROPERTIES LINK_FLAGS "-Wl,--wrap,close -Wl,--wrap,fclose -Wl,--rpath /opt/ibm/bb/lib -L/opt/ibm/bb/lib -lbbAPI ")
endif()
file(COPY gtc DESTINATION .)
file(COPY setupenv DESTINATION .)
This diff is collapsed.
subroutine chargee
use global_parameters
use particle_array
use field_array
implicit none
integer m,i,im,j,k,ip,jt,ii,j11,j10,j01,j00,ierror,ij,ipjt,&
icount,idest,isource,isendtag,irecvtag,istatus(MPI_STATUS_SIZE),jtgc0(me),jtgc1(me)
real(wp) weight,wemark,rdum,tdum,delr,delt(0:mpsi),delz,r,wz1,wz0,wp1,&
wp0,wt11,wt10,wt01,wt00,adum(0:mpsi),tflr,damping,pi2_inv,&
psitmp,thetatmp,zetatmp
real(wp) sendl(mgrid),recvr(mgrid)
real(wp) dnitmp(0:1,mgrid)
real(wp) wzgc(me),wpgc(me),wtgc0(me),wtgc1(me)
delr=1.0/deltar
delt=2.0*pi/deltat
delz=1.0/deltaz
pi2_inv=0.5/pi
densitye=0.0
!$omp parallel do private(m,psitmp,thetatmp,zetatmp,r,ip,jt,ipjt,wz1,&
!$omp& rdum,ii,wp1,tflr,im,tdum,j00,j01)
do m=1,me
psitmp=zelectron(1,m)
thetatmp=zelectron(2,m)
zetatmp=zelectron(3,m)
r=sqrt(2.0*psitmp)
ip=max(0,min(mpsi,int((r-a0)*delr+0.5)))
jt=max(0,min(mtheta(ip),int(thetatmp*pi2_inv*delt(ip)+0.5)))
ipjt=igrid(ip)+jt
wz1=(zetatmp-zeta0)*delz
wzgc(m)=wz1
rdum=delr*max(0.0_wp,min(a1-a0,r-a0))
ii=max(0,min(mpsi-1,int(rdum)))
wp1=rdum-real(ii)
wpgc(m)=wp1
! particle position in theta
tflr=thetatmp
! inner flux surface
im=ii
tdum=pi2_inv*(tflr-zetatmp*qtinv(im))+10.0
tdum=(tdum-aint(tdum))*delt(im)
j00=max(0,min(mtheta(im)-1,int(tdum)))
jtgc0(m)=igrid(im)+j00
wtgc0(m)=tdum-real(j00)
! outer flux surface
im=ii+1
tdum=pi2_inv*(tflr-zetatmp*qtinv(im))+10.0
tdum=(tdum-aint(tdum))*delt(im)
j01=max(0,min(mtheta(im)-1,int(tdum)))
jtgc1(m)=igrid(im)+j01
wtgc1(m)=tdum-real(j01)
enddo
#ifdef _OPENMP
! The following lines are OpenMP directives for loop-level parallelism
! on shared memory machines (see http://www.openmp.org).
! When compiling with the OpenMP option (-qsmp=omp for the XLF compiler on
! AIX and -mp for the MIPSpro compiler on IRIX), the preprocessor variable
! _OPENMP is automatically defined and we use it here to add pieces of codes
! needed to avoid contentions between threads. In the following parallel
! loop each thread has its own private array "dnitmp()" which is used to
! prevent the thread from writing into the shared array "densitye()"
!
!$omp parallel private(dnitmp)
dnitmp=0. ! Set array elements to zero
#endif
!$omp do private(m,weight,wz1,wz0,wp1,wp0,wt10,wt00,wt11,wt01,ij)
do m=1,me
weight=zelectron(5,m)
wz1=weight*wzgc(m)
wz0=weight-wz1
wp1=wpgc(m)
wp0=1.0-wp1
wt10=wp0*wtgc0(m)
wt00=wp0-wt10
wt11=wp1*wtgc1(m)
wt01=wp1-wt11
#ifdef _OPENMP
! Use thread-private temporary array dnitmp to store the results
ij=jtgc0(m)
dnitmp(0,ij) = dnitmp(0,ij) + wz0*wt00
dnitmp(1,ij) = dnitmp(1,ij) + wz1*wt00
ij=ij+1
dnitmp(0,ij) = dnitmp(0,ij) + wz0*wt10
dnitmp(1,ij) = dnitmp(1,ij) + wz1*wt10
ij=jtgc1(m)
dnitmp(0,ij) = dnitmp(0,ij) + wz0*wt01
dnitmp(1,ij) = dnitmp(1,ij) + wz1*wt01
ij=ij+1
dnitmp(0,ij) = dnitmp(0,ij) + wz0*wt11
dnitmp(1,ij) = dnitmp(1,ij) + wz1*wt11
#else
! If no loop-level parallelism, use original algorithm (write directly
! into array "densitye()".
ij=jtgc0(m)
densitye(0,ij) = densitye(0,ij) + wz0*wt00
densitye(1,ij) = densitye(1,ij) + wz1*wt00
ij=ij+1
densitye(0,ij) = densitye(0,ij) + wz0*wt10
densitye(1,ij) = densitye(1,ij) + wz1*wt10
ij=jtgc1(m)
densitye(0,ij) = densitye(0,ij) + wz0*wt01
densitye(1,ij) = densitye(1,ij) + wz1*wt01
ij=ij+1
densitye(0,ij) = densitye(0,ij) + wz0*wt11
densitye(1,ij) = densitye(1,ij) + wz1*wt11
#endif
enddo
#ifdef _OPENMP
! For OpenMP, we now write the results accumulated in each thread-private
! array dnitmp() back into the shared array densitye(). The loop is enclosed
! in a critical section so that one thread at a time updates densitye().
!$omp critical
do ij=1,mgrid
densitye(0:1,ij) = densitye(0:1,ij) + dnitmp(0:1,ij)
enddo
!$omp end critical
#endif
!$omp end parallel
! If we have a particle decomposition on the toroidal domains, do a reduce
! operation to add up all the contributions to charge density on the grid
if(npartdom>1)then
!$omp parallel do private(ij)
do ij=1,mgrid
dnitmp(0:1,ij)=densitye(0:1,ij)
densitye(0:1,ij)=0.
enddo
icount=2*mgrid
call MPI_ALLREDUCE(dnitmp,densitye,icount,mpi_Rsize,MPI_SUM,partd_comm,ierror)
endif
! poloidal end cell
do i=0,mpsi
densitye(:,igrid(i)+mtheta(i))=densitye(:,igrid(i)+mtheta(i))+densitye(:,igrid(i))
enddo
! toroidal end cell
sendl=densitye(0,:)
recvr=0.0
icount=mgrid
!!idest=mod(mype-1+numberpe,numberpe)
idest=left_pe
!!isource=mod(mype+1,numberpe)
isource=right_pe
!!isendtag=mype
isendtag=myrank_toroidal
irecvtag=isource
! send densitye to left and receive from right
call MPI_SENDRECV(sendl,icount,mpi_Rsize,idest,isendtag,&
recvr,icount,mpi_Rsize,isource,irecvtag,toroidal_comm,istatus,ierror)
if(myrank_toroidal == mtoroidal-1)then
! B.C. at zeta=2*pi is shifted
do i=0,mpsi
ii=igrid(i)
jt=mtheta(i)
densitye(1,ii+1:ii+jt)=densitye(1,ii+1:ii+jt)+&
cshift(recvr(ii+1:ii+jt),itran(i))
enddo
else
! B.C. at zeta<2*pi is continuous
densitye(1,:)=densitye(1,:)+recvr
endif
! zero out charge in radial boundary cell
do i=0,nbound-1
densitye(:,igrid(i):igrid(i)+mtheta(i))=&
densitye(:,igrid(i):igrid(i)+mtheta(i))*real(i)/real(nbound)
densitye(:,igrid(mpsi-i):igrid(mpsi-i)+mtheta(mpsi-i))=&
densitye(:,igrid(mpsi-i):igrid(mpsi-i)+mtheta(mpsi-i))*real(i)/real(nbound)
enddo
! flux surface average and normalization
zonale=0.0
do i=0,mpsi
do j=1,mtheta(i)
ij=igrid(i)+j
zonale(i)=zonale(i)+densitye(1,ij)
enddo
enddo
!$omp parallel do private(i,j,ij)
do i=0,mpsi
do j=1,mtheta(i)
ij=igrid(i)+j
densitye(1,ij)=densitye(1,ij)*markere(ij)
enddo
enddo
! toroidal sum of phi00, broadcast to every PE
call MPI_ALLREDUCE(zonale,adum,mpsi+1,mpi_Rsize,MPI_SUM,toroidal_comm,ierror)
zonale=adum*pmarke
! densitye subtracted (0,0) mode
!$omp parallel do private(i,j,ij)
do i=0,mpsi
do j=1,mtheta(i)
ij=igrid(i)+j
densitye(1,ij)=densitye(1,ij)-zonale(i)
enddo
! poloidal BC condition
densitye(1,igrid(i))=densitye(1,igrid(i)+mtheta(i))
enddo
! enforce charge conservation for zonal flow mode
rdum=0.0
tdum=0.0
do i=1,mpsi-1
r=a0+deltar*real(i)
rdum=rdum+r
tdum=tdum+r*zonale(i)
enddo
tdum=tdum/rdum
!$omp parallel do private(i)
do i=1,mpsi-1
zonale(i)=zonale(i)-tdum
enddo
end subroutine chargee
This diff is collapsed.
Subroutine dataout3d
use global_parameters
use field_array
implicit none
#ifdef ADIOS
! #define ADIOS_WRITE_PATH(a,b,c) call adios_write_path(a,'b'//char(0),b,c//char(0))
! #define ADIOS_WRITE(a,b) call adios_write(a,'b'//char(0),b)
integer*8 group_handle,type_id
integer next,previous,current
integer*8 :: adios_handle, adios_groupsize, adios_totalsize, adios_err
#else
include 'netcdf.inc'
#endif
integer i,j,k,kp,n,ij,istatus,ncid,dataid2(3),dimid(2),dataid1(2)
real(wp) theta_start(mpsi+1,2),data3d(mgrid),radial(mpsi+1),dataout(mgrid,2),&
X(mgrid,2),Y(mgrid,2),Z(mgrid,2),r,theta,zeta,theta0
character(len=1) cdum(3)
character(len=9) vdum
character(len=100) fdum,output_fname,dirstr
! HDF5 declarations
integer :: ierr ! Return error flag
!!first of all, write down the dimension information
#ifdef ADIOS
! Since we have several MPI processes on each plane, only one of these will
! participate in writing out the data for that plane. Let's pick the processes
! with myrank_partd=0 on each plane.
if(myrank_partd==0)then
if(myrank_toroidal==0)then
previous=-1
else
previous=myrank_toroidal-1
endif
current = myrank_toroidal
if(myrank_toroidal==(nproc_toroidal-1))then
next=-1
else
next=myrank_toroidal+1
endif
if(istep==ndiag)then
fdum='phi_dir/RUNcoord.bp'//char(0)
! write(dirstr,'("/Coordinate_tor",i5.5)')myrank_toroidal !!!(mstepall+istep)/ndiag
write(dirstr,'("/Coordinate")') !!!(mstepall+istep)/ndiag
! SAK: changed this to be in a global array
dirstr=trim(dirstr)//char(0)
call adios_open(adios_handle,"output3d.0"//char(0),fdum,"w"//char(0),adios_err)
call adios_set_path (adios_handle,dirstr,adios_err)
! call adios_get_group(type_id,"output3d.0"//char(0))
! call adios_set_path (type_id,dirstr)
! call adios_open(group_handle,type_id,fdum)
! call adios_group_by(type_id,"mype"//char(0),toroidal_comm,previous,current,next)
do i=0,mpsi
radial(i+1)=(a0+deltar*real(i))/rho0
enddo
! Mapping of magnetic coordinates onto cartesian grid and write into file
! We explicitely add the zeta=0 poloidal plane since it will be used by
! the vizualization software to connect the torus. myrank_toroidal=0 takes care of
! this, having one extra plane to write into the output file (kp=1).
cdum(1)='X'
cdum(2)='Y'
cdum(3)='Z'
!! if(myrank_toroidal.eq.0)then
!! kp=1 !add the plane zeta=0 to myrank_toroidal=0 (zeta0=0 for myrank_toroidal=0)
!! else
!! kp=0 !no changes for myrank_toroidal > 0
!! endif
kp=0
do k=1,1+kp
zeta=zeta0+deltaz*real(k-kp)
do i=0,mpsi
r=a0+deltar*real(i)
theta_start(i+1,k)=zeta*qtinv(i)
do j=0,mtheta(i)
ij=igrid(i)+j
theta=deltat(i)*real(j)+zeta*qtinv(i)
theta0=theta
X(ij,k)=cos(zeta)*(1.0+r*cos(theta0))
Y(ij,k)=-sin(zeta)*(1.0+r*cos(theta0))
Z(ij,k)=r*sin(theta0)
enddo
enddo
enddo
#include "gwrite_output3d.0.fh"
call adios_close(adios_handle,adios_err)
!!write dimension
! ADIOS_WRITE(group_handle,toroidal_comm)
! ADIOS_WRITE(group_handle,mpsi+1)
! ADIOS_WRITE(group_handle,kp+1)
!!write data
! ADIOS_WRITE(group_handle,myrank_toroidal)
! ADIOS_WRITE(group_handle,mpsi)
! ADIOS_WRITE(group_handle,kp)
! ADIOS_WRITE(group_handle,nproc_toroidal)
! ADIOS_WRITE(group_handle,radial)
! call adios_write(group_handle,"mtheta"//char(0),(mtheta+1))
! ADIOS_WRITE(group_handle,itran)
! ADIOS_WRITE(group_handle,mgrid)
! ADIOS_WRITE(group_handle,X)
! ADIOS_WRITE(group_handle,Y)
! ADIOS_WRITE(group_handle,Z)
! call adios_close(group_handle)
endif !! end if if(istep==ndiag)
! potential data file
kp=0
do k=1,1+kp
do i=0,mpsi
do j=0,mtheta(i)
ij=igrid(i)+j
dataout(ij,k)=phi(k-kp,ij)
enddo
enddo
enddo
write(output_fname,'("phi_dir/PHI_",i5.5,".bp")')mstepall+istep
output_fname=trim(output_fname)//char(0)
! write(dirstr,'("/Potential_tor",i5.5)')myrank_toroidal !!!(mstepall+istep)/ndiag
!SAK
write(dirstr,'("/Potential")') !!!(mstepall+istep)/ndiag
dirstr=trim(dirstr)//char(0)
call adios_open (adios_handle, "output3d.1"//char(0), output_fname,"w"//char(0),adios_err)
call adios_set_path (adios_handle,dirstr,adios_err)
#include "gwrite_output3d.1.fh"
call adios_close (adios_handle,adios_err)
! call adios_get_group (type_id, "output3d.1"//char(0))
! call adios_set_path (type_id,dirstr)
! call adios_open (group_handle, type_id, output_fname)
! ADIOS_WRITE(group_handle,toroidal_comm)
! ADIOS_WRITE(group_handle,kp+1) ! TODO check this for correctness
! ADIOS_WRITE(group_handle,myrank_toroidal)
! ADIOS_WRITE(group_handle,nproc_toroidal)
! ADIOS_WRITE(group_handle,mgrid)
! ADIOS_WRITE(group_handle,kp)
! ADIOS_WRITE(group_handle,mpsi)
! ADIOS_WRITE(group_handle,mpsi+1)
! call adios_write(group_handle,"mtheta"//char(0),(mtheta+1))
! call adios_write (group_handle, "phi"//char(0), dataout)
! call adios_close (group_handle)
endif !!end myrank_partd
! if(myrank_partd==0)then
! The processes not participating in writing to file wait at the following
! barrier. We use the partd_comm communicator since it does not require a
! fully global synchronization.
call MPI_BARRIER(partd_comm,ierr)
#else
!!!netcdf 3d data
if(istep==ndiag)then
if(mype==0)then
do i=0,mpsi
radial(i+1)=(a0+deltar*real(i))/rho0
enddo
! write run-dimension
fdum='phi_dir/RUNdimen.ncd'
! open netcdf data file
istatus=nf_create(trim(fdum),nf_clobber,ncid)
! define data array dimension
istatus=nf_def_dim(ncid,'scalar',1,dimid(1))
istatus=nf_def_dim(ncid,'flux-surfaces',mpsi+1,dimid(2))
! define data array id
istatus=nf_def_var(ncid,'PE-number',nf_int,1,dimid(1),dataid1(1))
istatus=nf_def_var(ncid,'flux-surface-number',nf_int,1,dimid(1),dataid1(2))
istatus=nf_def_var(ncid,'radial-grids',nf_real,1,dimid(2),dataid2(1))
istatus=nf_def_var(ncid,'poloidal-grids',nf_int,1,dimid(2),dataid2(2))
istatus=nf_def_var(ncid,'index-shift',nf_int,1,dimid(2),dataid2(3))
! end of netcdf definition
istatus=nf_enddef(ncid)
! write data
istatus=nf_put_var_int(ncid,dataid1(1),numberpe)
istatus=nf_put_var_int(ncid,dataid1(2),mpsi+1)
istatus=nf_put_var_real(ncid,dataid2(1),radial)
istatus=nf_put_var_int(ncid,dataid2(2),mtheta+1)
istatus=nf_put_var_int(ncid,dataid2(3),itran)
! check error
if (istatus .ne. nf_noerr) then
print *, nf_strerror(istatus)
endif
! close netcdf data file
istatus=nf_close(ncid)
endif
! grid coordinates,each PE writes to a separate file
cdum(1)='X'
cdum(2)='Y'
cdum(3)='Z'
do n=1,3
zeta=zeta1
do i=0,mpsi
r=a0+deltar*real(i)
do j=0,mtheta(i)
ij=igrid(i)+j
theta=deltat(i)*real(j)+zeta*qtinv(i)
theta0=theta
! grid coordinates (x,y,z), use geometry center as the origin
if(n==1)then
data3d(ij)=cos(zeta)*(1.0+r*cos(theta0))
elseif(n==2)then
data3d(ij)=-sin(zeta)*(1.0+r*cos(theta0))
else
data3d(ij)=r*sin(theta0)
endif
enddo
enddo
! coordinate data file name
if(myrank_toroidal < 10)then
write(fdum,'("phi_dir/NCD",a1,"coor.00",i1)')cdum(n),myrank_toroidal
elseif(myrank_toroidal < 100)then
write(fdum,'("phi_dir/NCD",a1,"coor.0",i2)')cdum(n),myrank_toroidal
else
write(fdum,'("phi_dir/NCD",a1,"coor.",i3)')cdum(n),myrank_toroidal
endif
! variable name
write(vdum,'(a1,"coordina")')cdum(n)
! open netcdf data file
istatus=nf_create(trim(fdum),nf_clobber,ncid)
! define data array dimension
istatus=nf_def_dim(ncid,'poloidal_grid',mgrid,dimid(1))
istatus=nf_def_dim(ncid,'toroidal_grid',1,dimid(2))
! define data array id
istatus=nf_def_var(ncid,vdum,nf_real,2,dimid,dataid1(1))
! end of netcdf definition
istatus=nf_enddef(ncid)
! write data
istatus=nf_put_var_real(ncid,dataid1(1),data3d)
istatus=nf_close(ncid)
enddo
endif
! potential data file
if(myrank_partd==0)then
do i=0,mpsi
do j=0,mtheta(i)
ij=igrid(i)+j
data3d(ij)=phi(1,ij)
enddo
enddo
write(fdum,'("phi_dir/PHI_",i0,"_",i0,".ncd")')(mstepall+istep),myrank_toroidal
! variable name
write(vdum,'("Potential")')
! open netcdf data file
istatus=nf_create(trim(fdum),nf_clobber,ncid)
! define data array dimension
istatus=nf_def_dim(ncid,'poloidal_grid',mgrid,dimid(1))
istatus=nf_def_dim(ncid,'toroidal_grid',1,dimid(2))
! define data array id
istatus=nf_def_var(ncid,vdum,nf_real,2,dimid,dataid1(1))
! end of netcdf definition
istatus=nf_enddef(ncid)
! write data
istatus=nf_put_var_real(ncid,dataid1(1),data3d)
istatus=nf_close(ncid)
endif
#endif
end subroutine dataout3d
This diff is collapsed.
This diff is collapsed.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
subroutine eqplot
use global_parameters
use equilibrium
implicit none
integer i,j,neq,ieq,nplot
real dum,spdum,psi(lsp),datap(lsp),theta(lst),datat(lst),datax(lsp,lst),&
dataz(lsp,lst),data(lsp,lst)
real,external :: splcos,splsin
ieq=120 !open equilibiurm plot output file
open(ieq,file='equilibrium.out',status='replace')
101 format(i6)
102 format(e16.8)
!# of 1D radial plots
nplot=19
write(ieq,101)nplot,lsp
!radial axis using poloidal flux function
do i=1,lsp
psi(i)=spdpsi*real(i-1)
enddo
write(ieq,102)psi
!1: sqaure-root of normalized toroidal flux function
datap=stpp(:)
write(ieq,102)datap
!2: minor radius
datap=mipp(:)
write(ieq,102)datap
!3: major radius
datap=mapp(:)
write(ieq,102)datap
!4: Te
datap=tepp(1,:)
write(ieq,102)datap
!5: ne
datap=nepp(1,:)
write(ieq,102)datap
!6: ti
datap=tipp(1,:)
write(ieq,102)datap
!7: zeff
datap=zepp(1,:)
write(ieq,102)datap
!8: toroidal rotation
datap=ropp(1,:)