###############################################################################
# CCCjobs is used to define a hash containing commonly used job strings
#
# $Id: CCCjobs.pm 656 2011-12-28 19:51:43Z acrnrls $
###############################################################################
package CCCjobs;
require Exporter;
@ISA    =qw(Exporter);
@EXPORT    = qw(%JOBDEF $RCM_PARAMS %diag_deck);
@EXPORT_OK = qw(%JOBDEF $RCM_PARAMS %diag_deck);

###############################################################################
# The JOBDEF hash contains internal job definitions used to create job strings
# "on the fly" with cccjob using command line directives.
###############################################################################
# The diag_deck hash contains info about diagnostic decks, which ones to use
# and in what order.
###############################################################################

###############################################################################
####################             t_cpl_t63                 ####################
###############################################################################
$JOBDEF{job}{t_cpl_t63} =
q@#!/bin/sh
set -a

. betapath2   # sets path for beta test version of new diagnostics package

#  * ..................... Definition of Global Parameters .....................

runid="job000"; uxxx='uxxx'; prefix="${uxxx}_${runid}"; crawork=${runid}_job;
RUNID=`echo "${runid}"|tr '[a-z]' '[A-Z]'`;
username="acrnxxx"; user="XXX"; nqsext="_${runid}";

#  * ........................... Parmsub Parameters ............................

modver=gcm13d
gcmparm=gcmparm ; stime=900 ; memory1=1000mb
lopgm=lopgm_r8i8; oldiag=diag4;

#  * ............................ Condef Parameters ............................

script=off
noprint=on
nextjob=on
samerun=on
openmp=on
nproc_a=8
nnode_a=2
nproc_o=8
nnode_o=1
parallel_io=off
coupled=on

#  * ............................. Deck Definition .............................

. gcmsub.dk
cat > Model_Input <<'end_of_data'

### gcmparm ###

### parmsub ###

 . betapath2  # sets path for beta test version of new diagnostics package

#  * ..................... Definition of Global Parameters .....................

 runid="job000"; uxxx='uxxx'; prefix="${uxxx}_${runid}"; crawork=${runid}_job;
 RUNID=`echo "${runid}"|tr '[a-z]' '[A-Z]'`;
 username="acrnxxx"; user="XXX"; nqsext="_${runid}";

#  * ........................... Parmsub Parameters ...........................

 year=1982; mon=04; year_restart=1982; mon_restart=04; kfinal="         0";
 model=${prefix}_${year}_m${mon}_;
 start=${prefix}_${year_restart}_m${mon_restart}_;

 run="${RUNID}_${year}_M${mon}";

 initime=900; initmem="1000mb";
 gcmtime=10800; gcmmem="8000mb";

 iyear=" 1979"; iday="  001"; gmt=" 0.00"; incd="    1";

 lopgm=lopgm_r8i8;
 iocean="00001"; isoc="    1"; ires="    1"; resid="udr_maxe_res";
 ifdiff="    0"; isen="    1";
 issp="   48"; isgg="   48"; israd="   96"; isbeg="   96";
 jlatpr="    0";icsw="    4"; iclw="   24";  iepr="    0";  istr="   96";
 llphys=iph15lp;
 gpinit=ie791gp;
 inatmo=inatmo11;
 inphys=inphys13;
 llchem=ipchemlp;
 oxi=iphoxlp;
 mtnfile=opt_phis_t63;
 maskfile=gcm3_landmask_128x64v5;
 subfle=etopo5_gau_020_opt_200_129_64;

#  * .................... Begin Coupled Model Parameters ...........................
#
#     ocnmod: 0 = specified ocean temperature
#             1 = slab ocean
#             2 = 3D ocean model
#
#     icemod: 0 = specified ice
#             1 = thermodynamic + cavitating ice dynamics
#
#     rivmod: 0 = no targetting of land runoff to oceans
#             1 = targetting of land runoff using River Routing Scheme
#
#      iflux: 0 = adaptation (climatology file required)
#             1 = flux adjustment (flux correction file required)
#             2 = no flux correction
#             3 = spin-up by nudgingto OBSERVATIONAL SST
#
#  couplermod 0 = Standard mode
#             1 = OGCM/Coupler only mode
#
    ocnmod="00002";
    icemod="00001";
    rivmod="00001";
     iflux="00002";
couplermod="00000";
#
# --------------------- Time Parameters --------------------
#
# ncount:   Maximum number of coupling step before CGCM  stops and then
#           resubmits itself to the NQS queue.
#
# atmsteps: Coupling Frequency for the Atmospheric model (in seconds)
#
# ocnsteps: Coupling Frequency for the Ocean model (in seconds)
#
# ksteps:   Number of AGCM model time steps in a job (must be equal to
#           length of atmsteps)
#
   months=1;
   loop=1;
#
   ncount="00004";
   atmsteps="  86400";
   ocnsteps="  86400";

   ksteps="   96";
#     ksteps=atmsteps/delt = 86400/900 = 96
# -----------------------------------------------------------
#
 if [ "$ocnmod" = "00001" ] ; then
   oceanslab=on;
 fi

 if [ "$ocnmod" = "00002" ] ; then
   oceanslab=off;
 fi

 if [ "$couplermod" = "00001" ] ; then
   oceanonly=on;
 else
   oceanonly=off;
 fi

 if [ "$iflux" = "00000" ] ; then
    flux="udr_cgcm3.1_adaptation_256x192l29v1"
 elif [ "$iflux" = "00001" ] ; then
    flux="mc_xi_flux_year";
 elif [ "$iflux" = "00002" ] ; then
    flux="junk";
 elif [ "$iflux" = "00003" ] ; then

#   FORECAST RUN: SPINUP MODE 
#
#   reads PCMDI OBS SST, BUT SALT and SIC are from climatogical
#   annual cycle of "ubp_cgcm3.1_adaptation_256x192l29v1_salt_sic".
#
#   NOTE: USING ADAPTATION TECHNOLOGY BUT IT USES OBSERVATIONAL
#         EVOLVING TIME SERIES OF MONTHLY MEAN SST INSTEAD OF ITS
#         CLIMATOLOGICAL ANNUAL CYCLE.  
#    NOTE: data files are passed to gcmjcl.dk as an update (see update section) 
#
     yrm1=`expr $year - 1 | $AWK '{printf "%4d", $0}'`
     yrp1=`expr $year + 1 | $AWK '{printf "%4d", $0}'`
     yymm1="${yrm1}120100"; yymm2="${yrp1}010100"; int="   1";
     amipsst="ubp_amip2a_amipobs_sst_oc_256x192_1956_2003";
     fclim="ubp_cgcm3.1_adaptation_256x192l29v1_salt_sic";
  else
     flux=invalid;
  fi

  target=gcm3_128x64_v5_river_parameters_v2;
  lakes=gcm3_128x64_v5_lake_parameters_v2;
#
#  * ...................... End Coupled Model Parameters ...........................


# Required parmsub parameters for gcmparm
# (* denotes a critical parameter which must appear in the ##PARC section)
#ex   * ilev   = number of vertical model    levels                (e.g.  35)
#ex   * levs   = number of vertical moisture levels                (e.g.  35)
#ex   * lonsld = number of longitudes         on the dynamics grid (e.g. 144)
#ex   * nlatd  = number of gaussian latitudes on the dynamics grid (e.g.  72)
#ex     lond   = number of grid points per slice for dynamics      (e.g. 144)
#ex   * lonsl  = number of longitudes         on the physics  grid (e.g.  96)
#ex   * nlat   = number of gaussian latitudes on the physics grid  (e.g.  48)
#ex     lon    = number of grid points per slice for physics       (e.g.  96)
#ex   * lmt    = spectral truncation wave number                   (e.g.  47)
#ex     ioztyp = switch for input ozone distribution               (e.g.   2)
#ex   * ntrac  = total number of tracers in the model              (e.g.  17)
#ex   * itraca = number of advected tracers in the model           (e.g.  12)
#ex     nnode_a= number of smp nodes used to run atm (mpi for nnode_a>1)
#
#       nnode_a is defined before gcmsub at the top of this script
#
    lon="  128";
   lond="  192";
 ioztyp="    2";

#  * .................... Begin Critical Parameters ...........................
##PARC

   ilev="   31"     ;   levs="   31"     ;    lmt="   63"     ;
  nlatd="   96"     ; lonsld="  192"     ;
   nlat="   64"     ;  lonsl="  128"     ;
  ntrac="    0"     ; itraca="    0"     ;
  coord=" ET15"     ;   plid="      50.0";
  moist=" QHYB"     ; itrvar="QHYB"      ;
   delt="    900.0" ;    lay="    2"     ;

 g01="-0100";  g02="-0420";  g03="   12";  g04="   23";  g05="   41";
 g06="   68";  g07="  104";  g08="  148";  g09="  197";  g10="  261";
 g11="  329";  g12="  398";  g13="  466";  g14="  531";  g15="  592";
 g16="  648";  g17="  698";  g18="  742";  g19="  780";  g20="  813";
 g21="  841";  g22="  865";  g23="  885";  g24="  902";  g25="  916";
 g26="  930";  g27="  944";  g28="  958";  g29="  972";  g30="  986";
 g31="  995";  g32="     ";  g33="     ";  g34="     ";  g35="     ";
 g36="     ";  g37="     ";  g38="     ";  g39="     ";  g40="     ";
 g41="     ";  g42="     ";  g43="     ";  g44="     ";  g45="     ";
 g46="     ";  g47="     ";  g48="     ";  g49="     ";  g50="     ";
 h01="-0100";  h02="-0420";  h03="   12";  h04="   23";  h05="   41";
 h06="   68";  h07="  104";  h08="  148";  h09="  197";  h10="  261";
 h11="  329";  h12="  398";  h13="  466";  h14="  531";  h15="  592";
 h16="  648";  h17="  698";  h18="  742";  h19="  780";  h20="  813";
 h21="  841";  h22="  865";  h23="  885";  h24="  902";  h25="  916";
 h26="  930";  h27="  944";  h28="  958";  h29="  972";  h30="  986";
 h31="  995";  h32="     ";  h33="     ";  h34="     ";  h35="     ";
 h36="     ";  h37="     ";  h38="     ";  h39="     ";  h40="     ";
 h41="     ";  h42="     ";  h43="     ";  h44="     ";  h45="     ";
 h46="     ";  h47="     ";  h48="     ";  h49="     ";  h50="     ";

# namelists

   sref="  10.00E-3";   spow="        1.";
   ilaun=0;

#  * ...................... End Critical Parameters ...........................

### condef ###

initsp=off
openmp=on
coupled=on
noprint=on
nextjob=on
nolist=off
acctcom=on
xmu=off
pakjob=off
debug=off
noxlfimp=off
samerun=on

### update script ###
 
%c gcmjcl
%d gcmjcl.672
%i gcmjcl.673
if [ "$coupled" = on ] ; then
 set -x
 if [ "$iflux" = "00003" ] ; then
   if [ ! -s FLUX ] ; then
 #   FORECAST RUN: SPINUP MODE
 #   reads PCMDI BCS SST which have been inflated; SALT and SIC are from climatogical
 #   annual cycle of "ubp_cgcm3.1_adaptation_256x192l29v1_salt_sic".
 #   yrm1=`expr $year - 1 | $AWK '{printf "%4d", $0}'`
 #   yrp1=`expr $year + 1 | $AWK '{printf "%4d", $0}'`
 #   yymm1="${yrm1}12"; yymm2="${yrp1}01"; int="   1";
 
      access   amipbc $amipsst
 
 ccc  select   amipbc    gto <<SCARD
 SELECT        ${yymm1}${yymm2} $int LEVS    1 1000 NAME AMGT
SCARD
      release  amipbc
 
 #....climatological annual cycle data for salt and SIC
      access  fclim  $fclim
 
      joinup  FLUX gto  fclim
      ggstat  FLUX 
      release gto  fclim 
   fi
 else
   if [ ! -s FLUX ] ; then
     access FLUX    ${flux}  na
   fi
 fi
fi
%d gcmjcl.1037
if [ "$iflux" = "00003" ] ; then
  rm -f OGCM TARGET LAKES
else
   rm -f OGCM FLUX TARGET LAKES
fi

end_of_data

. endjcl.cdk

#end_of_job
@;
$JOBDEF{description}{t_cpl_t63} = "coupled model job string -- T63 31 levels";
$JOBDEF{hide}{t_cpl_t63} = "yes";  # special coupled script for Badal
#******************************************************************************
#*******************          END of t_cpl_t63             ********************
#******************************************************************************

###############################################################################
####################              diag2nc                  ####################
###############################################################################
$JOBDEF{job}{diag2nc} =
q@#!/bin/sh
#=======================================================================
# Create netcdf files from diagnostic files.
# Only the superlabeled variables GT PMSL RH U V W Q T GZ VORT PCP are
# put into the netcdf file
# This requires that ccc2nc be available on your PATH
#=======================================================================

set -a
. betapath2

runid=job000; crawork="${runid}_job";
username="acrnxxx"; user="CCCma";

nextjob=on
noprint=off

jobname=diag2nc; time="900" ; memory="900mb" ;

. comjcl.cdk

cat > Execute_Script <<'end_of_script'

which ccc2nc || exit 1

username="acrnxxx"; user="CCCma"; crawork="${runid}_job";
noprint=off

# Create a list of gp and xp file names beginning with $prefix
# for the years from $diag2nc_start_year to $diag2nc_end_year

runid=aaa; uxxx=uxxx; diag2nc_uxxx=$uxxx
diag2nc_prefix=${diag2nc_uxxx}_${runid}; prefix=$diag2nc_prefix

# These variables are set when the job string is created
 current_year=NotSet
 current_month=NotSet
 previous_year=NotSet
 previous_month=NotSet
 next_year=NotSet
 next_month=NotSet
 if [ $current_year = "NotSet" -o $current_month = "NotSet" ]; then
   echo "diag2nc: current_year or current_month is not set"
   exit 1
 fi

 # files loaded from cfs will be for months in the range from
 # diag2nc_start_year,diag2nc_start_mon to diag2nc_end_year,diag2nc_end_mon
 if [ $previous_year = "NotSet" -o $previous_month = "NotSet" ]; then
   if [ $next_year = "NotSet" -o $next_month = "NotSet" ]; then
     echo "diag2nc: Neither previous_(year|month) nor next_(year|month) are set"
     exit 1
   else
     diag2nc_start_year=$current_year
     diag2nc_start_mon=$current_month
     diag2nc_end_year=$next_year
     diag2nc_end_mon=$next_month
   fi
 else
   diag2nc_start_year=$previous_year
   diag2nc_start_mon=$previous_month
   diag2nc_end_year=$current_year
   diag2nc_end_mon=$current_month
 fi

 diag2nc_start_year=`echo $diag2nc_start_year|awk '{printf "%3.3d",$1}' -`
 diag2nc_start_mon=`echo $diag2nc_start_mon|awk '{printf "%2.2d",$1}' -`
 diag2nc_end_year=`echo $diag2nc_end_year|awk '{printf "%3.3d",$1}' -`
 diag2nc_end_mon=`echo $diag2nc_end_mon|awk '{printf "%2.2d",$1}' -`

 if [ -z "$diag2nc_start_year" ]; then
   echo "diag2nc: diag2nc_start_year is null"
   exit 1
 fi
 if [ -z "$diag2nc_end_year" ]; then
   echo "diag2nc: diag2nc_end_year is null"
   exit 1
 fi
 if [ -z "$diag2nc_start_mon" ]; then
   echo "diag2nc: diag2nc_start_mon is null"
   exit 1
 fi
 if [ -z "$diag2nc_end_mon" ]; then
   echo "diag2nc: diag2nc_end_mon is null"
   exit 1
 fi
 if [ $diag2nc_start_mon -gt 12 -o $diag2nc_start_mon -lt 1 ]; then
   echo "diag2nc: diag2nc_start_mon=$diag2nc_start_mon is out of range"
   exit 1
 fi
 if [ $diag2nc_end_mon -gt 12 -o $diag2nc_end_mon -lt 1 ]; then
   echo "diag2nc: diag2nc_end_mon=$diag2nc_end_mon is out of range"
   exit 1
 fi
 if [ $diag2nc_start_year -gt $diag2nc_end_year ]; then
   echo "diag2nc: diag2nc_start_year=$diag2nc_start_year is out of range"
   exit 1
 fi

flist=''
curr_year=`echo $diag2nc_start_year|awk '{y=$1-1;printf "%3.3d", y}' -`
while [ $curr_year -lt $diag2nc_end_year ]; do
  curr_year=`echo $curr_year|awk '{y=1+$1;printf "%3.3d", y}' -`
  if [ $curr_year -ne $diag2nc_start_year ]; then
    flist="$flist ${prefix}_${curr_year}_djf_gp ${prefix}_${curr_year}_djf_xp"
  fi
  flist="$flist ${prefix}_${curr_year}_jja_gp ${prefix}_${curr_year}_jja_xp"
done

# Extract specific variables from each diagnostic file
# and create netcdf files containing these variables
for cccnm in $flist; do
  gp=`echo $cccnm|sed -n '/gp$/p'`
  xp=`echo $cccnm|sed -n '/xp$/p'`
  if [ x"$xp" != x ]; then
    ftype=xp
  elif [ x"$gp" != x ]; then
    ftype=gp
  else
    ftype=''
    echo "ftype is not supported for file $cccnm"
    continue
  fi
  access DIAG $cccnm
  if [ "$ftype" = "gp" ]; then
    ccc xfind DIAG GTx PSx Ux Vx Wx Qx Tx GZx VORTx PCPx <<EOF
  XFIND   1   GT
  XFIND   1   PMSL
  XFIND   1   U
  XFIND   1   V
  XFIND   1   W
  XFIND   1   Q
  XFIND   1   T
  XFIND   1   GZ
  XFIND   1   VORT
  XFIND   1   PCP
EOF
  elif [ "$ftype" = "xp" ]; then
    ccc xfind DIAG GTx PSx Ux Vx Wx Qx Tx GZx VORTx PCPx <<EOF
  XFIND   1   (GT)
  XFIND   1   (PS)
  XFIND   1   (U)R
  XFIND   1   (V)R
  XFIND   1   (W)R
  XFIND   1   (Q)R
  XFIND   1   (T)R
  XFIND   1   (GZ)R
  XFIND   1   (VORT)R
  XFIND   1   (PCP)
EOF
  else
    echo "$cccnm is not a diagnostic file (*gp or *xp)"
    continue
  fi
  rm -f DIAG

  rm -f OMEG PHI PMSL RHUM SHUM TEMP PCP

  ccc newnam GTx GT <<EOF
    NEWNAM   GT
EOF
  ccc newnam PSx PS <<EOF
    NEWNAM   PS
EOF
#  ccc newnam RHx RH <<EOF
#    NEWNAM   RH
#EOF
  ccc newnam Ux U <<EOF
    NEWNAM    U
EOF
  ccc newnam Vx V <<EOF
    NEWNAM    V
EOF
  ccc newnam Wx W <<EOF
    NEWNAM    W
EOF
  ccc newnam Qx Q <<EOF
    NEWNAM    Q
EOF
  ccc newnam Tx T <<EOF
    NEWNAM    T
EOF
  ccc newnam GZx GZ <<EOF
    NEWNAM   GZ
EOF
  ccc newnam VORTx VORT <<EOF
    NEWNAM VORT
EOF
  ccc newnam PCPx PCP <<EOF
    NEWNAM PCP
EOF
  rm -f GTx PSx Ux Vx Wx Qx Tx GZx VORTx PCPx

  joinup $cccnm GT PS U V W Q T GZ VORT PCP
  rm -f GT PS U V W Q T GZ VORT PCP

  ccc2nc $cccnm
  if [ -s ${cccnm}.nc ]; then
    chmod a+r ${cccnm}.nc
    save ${cccnm}.nc ${cccnm}.nc
  fi
  rm -f $cccnm GT PS U V W Q T GZ VORT PCP
done

end_of_script

. endjcl.cdk

#end_of_job
@;
$JOBDEF{description}{diag2nc} = "convert CCCma format to netcdf";
$JOBDEF{hide}{diag2nc} = "yes";
#******************************************************************************
#*******************          END of diag2nc               ********************
#******************************************************************************

###############################################################################
####################               try1                    ####################
###############################################################################
$JOBDEF{job}{try1} =
q@#!/bin/sh
#************************************************************************
#****************************  try 1 test job  **************************
#************************************************************************
username="acrnxxx"; user="CCCma";
runid="job000"; prefix="uxxx_${runid}"; crawork=${runid}_job;
nqsext="_${runid}";
RUNID=`echo "${runid}"|tr '[a-z]' '[A-Z]'`;

cat > Model_Input <<'end_of_data'

### gcmparm ###

### parmsub ###

 . betapath2

#  * ..................... Definition of Global Parameters .....................

initsp=;  samerun=$oldsame
year="xxx"; mon=yyy; year_restart=; mon_restart=; kfinal=
days=; obsday=
mon1=12; mon2=01; mon3=02

monn=

runid=123 prefix=abc

tv=`backtics`; tv='single quotes'; tv="double quotes"; tv=yada; tv=${scolp}
 tv=`backtics`
 tv='single quotes'
 tv="double quotes"
 tv=
 tv=yada
tv='la la la
    ha ha ha
    yadda yadda yadda'
 tw=1.345
 zz=4;
 tv=$($(ls);$(hostanme $(blah $(haha ))));
 tv=
 # tv=(this is a (lalala) sub shell)  tv=[this is [lalala] ]  tv={this is {lalala}}

 test_var=$($(ls);$(hostanme $(blah $(haha )))); test_var2=
 test_var=`$[$[ls];$[hostanme $[blah $[haha ]]]]`; # hello
 test_var2='cde;def'  ;
### update model ###

%c gcm15e

%i xxx
%d yyy

### update sub ###

%d aaa
%i bbb
end_of_data

. endjcl.cdk

#end_of_job
@;
$JOBDEF{description}{try1} = "debugging module 1";
$JOBDEF{hide}{try1} = "yes";
#******************************************************************************
#*******************           END of try1                 ********************
#******************************************************************************

###############################################################################
####################                try2                   ####################
###############################################################################
$JOBDEF{job}{try2} =
q@#!/bin/sh
#************************************************************************
#****************************  try 2 test job  **************************
#************************************************************************

set -a
. betapath2

username="acrnxxx"; user="XXX"; uxxx='uxxx'
runid=xtune;  crawork="${runid}_job"

nextjob=on
noprint=off

jobname=try2; time="600" ; memory="900mb" ;

# <<INSERT>>
# <<INSERT 0 >>
# <<INSERT 1 >>
# <<INSERT 2 >>
# <<INSERT 3 >>
# <<INSERT>>
# <<INSERT 4 >>
# <<INSERT 5 >>
# <<INSERT>>

. comjcl.cdk

cat > Execute_Script <<'end_of_script'

## parmsub

username="acrnxxx"; user="XXX"; uxxx='uxxx'
runid=xtune;  crawork="${runid}_job"

#  * ........................... Parmsub Parameters ............................

exec /usr/bin/perl

$x = 45;
print "x=$x\n";

end_of_script

cat > Model_Input <<'end_of_data'
end_of_data

. endjcl.cdk

#end_of_job
@;
$JOBDEF{description}{try2} = "debugging module 2";
$JOBDEF{hide}{try2} = "yes";
#******************************************************************************
#*******************           END of try2                 ********************
#******************************************************************************

###############################################################################
####################                dummy                   ####################
###############################################################################
$JOBDEF{job}{dummy} =
q@#!/bin/sh
#************************************************************************
#****************************   Place holder   **************************
#************************************************************************
 set -a
 . betapath2

 #============================================
 months=NotSet
 #============================================

 username="acrnxxx"; user="XXX"; uxxx='uxxx'
 runid=xtune;  crawork="${runid}_job"
 RUNID=XTUNE

 noprint=off
 nextjob=on

 kfinal=NotSet

          year=NotSet;             mon=NotSet
  year_restart=NotSet;     mon_restart=NotSet

 previous_year=NotSet;  previous_month=NotSet
  current_year=NotSet;   current_month=NotSet
     next_year=NotSet;      next_month=NotSet

 initsp=NotSet;  samerun=NotSet
 days=NotSet;  obsday=NotSet
 mon1=NotSet;  mon2=NotSet;  mon3=NotSet;

 jobname=dummy; time="100" ; memory="900mb" ;

 . comjcl.cdk

cat > Execute_Script <<'end_of_script'

  runid=job000
  crawork="${runid}_job"
  noprint=off
  nextjob=on

  # These variables are set when the job string is created
  run_start_year=NotSet
  run_start_month=NotSet
  run_stop_year=NotSet
  run_stop_month=NotSet
  current_year=NotSet
  current_month=NotSet
  previous_year=NotSet
  previous_month=NotSet
  next_year=NotSet
  next_month=NotSet

  this_host=`uname -n|awk -F\. '{print \$1}' -`
  case $this_host in
    # aliases for powerX clusters
    c1h*) this_host=spica ;;
    c2h*) this_host=hadar ;;
    c1*) this_host=azur ;;
    c3*) this_host=rigel ;;
    c4*) this_host=maia ;;
    c5*) this_host=naos ;;
  esac
  echo "This machine: $this_host"

  dummy_line=''
  [ -n "$dummy_line" ] && echo "$dummy_line"

  echo "End dummy"

end_of_script

 . endjcl.cdk

#end_of_job
@;
$JOBDEF{description}{dummy} = "place holder";
$JOBDEF{hide}{dummy} = "yes";
#******************************************************************************
#*******************           END of dummy                ********************
#******************************************************************************

###############################################################################
####################                mkdhist                ####################
###############################################################################
$JOBDEF{job}{mkdhist} =
q@#!/bin/sh
#************************************************************************
#**********************  make dummy history files  **********************
#************************************************************************

set -a
 . betapath2

 username="acrnxxx"; user="XXX"; uxxx='uxxx'
 runid=xyz;  crawork="${runid}_job"

 nextjob=on
 noprint=on

 coupled=on

 jobname=mkdhist; mkdhist_time=1800; time=$mkdhist_time; memory="900mb"

 # Allow the user to reset CCRNTMP and/or RUNPATH
 mkdhist_RUNPATH=''
 RUNPATH=${mkdhist_RUNPATH:=$RUNPATH}
 mkdhist_CCRNTMP=''
 CCRNTMP=${mkdhist_CCRNTMP:=$CCRNTMP}

 # Alternate path to a directory where .queue/.crawork will be found
 JHOME=''

 if [ -n "$JHOME" -a x"$JHOME" != x"$HOME" ]; then
   # Allow optional reset of DATAPATH/RUNPATH
   JHOME_DATA=''
   DATAPATH=${JHOME_DATA:=$DATAPATH}
   RUNPATH=${JHOME_DATA:=$RUNPATH}
   # Allow optional reset of CCRNTMP
   JHOME_RUN=''
   CCRNTMP=${JHOME_RUN:=$CCRNTMP}
 fi

 . comjcl.cdk

cat > Execute_Script <<'end_of_script'

  # bail is a simple error exit routine
  bail(){
    echo "mkdhist: "$1
    echo " " >> haltit
    exit 1
  }

#  * ........................... Parmsub Parameters ............................

 if [ -n "$JHOME" -a x"$JHOME" != x"$HOME" ]; then
   # Allow optional reset of DATAPATH/RUNPATH
   JHOME_DATA=''
   DATAPATH=${JHOME_DATA:=$DATAPATH}
   RUNPATH=${JHOME_DATA:=$RUNPATH}
   # Allow optional reset of CCRNTMP
   JHOME_RUN=''
   CCRNTMP=${JHOME_RUN:=$CCRNTMP}
 fi

 # If clone is defined it must be a file name (without the trailing _mXX_gs)
 # Files with this prefix will be copied to new names defined by the current
 # uxxx, runid and year range. If any of these files do not exist the job
 # will fail.
 mkdhist_clone=''
 eval clone\=$mkdhist_clone

 runid="job000"; uxxx='uxxx'; mkdhist_uxxx=$uxxx;
 jobname=mkdhist; mkdhist_prefix="${mkdhist_uxxx}_${runid}"
 crawork="${runid}_job"; username="acrnxxx"; user="XXX";

 # These variables are set when the job string is created
 run_start_year=NotSet  # memory99=1
 run_start_month=NotSet # memory99=1
 run_stop_year=NotSet   # memory99=1
 run_stop_month=NotSet  # memory99=1
 current_year=NotSet    # memory99=1
 current_month=NotSet   # memory99=1
 previous_year=NotSet   # memory99=1
 previous_month=NotSet  # memory99=1
 next_year=NotSet       # memory99=1
 next_month=NotSet      # memory99=1
 if [ $current_year = "NotSet" -o $current_month = "NotSet" ]; then
   bail "mkdhist: current_year or current_month is not set"
 fi

 # files copied will be for months in the range from
 # mkdhist_start_year,mkdhist_start_mon to mkdhist_end_year,mkdhist_end_mon
 if [ $previous_year = "NotSet" -o $previous_month = "NotSet" ]; then
   if [ $next_year = "NotSet" -o $next_month = "NotSet" ]; then
     bail"mkdhist: Neither previous_(year|month) nor next_(year|month) are set"
   else
     mkdhist_start_year=$current_year
     mkdhist_start_mon=$current_month
     mkdhist_end_year=$next_year
     mkdhist_end_mon=$next_month
   fi
 else
   mkdhist_start_year=$previous_year
   mkdhist_start_mon=$previous_month
   mkdhist_end_year=$current_year
   mkdhist_end_mon=$current_month
 fi

 mkdhist_start_year=`echo $mkdhist_start_year|awk '{printf "%3.3d",$1}' -`
 mkdhist_start_mon=`echo $mkdhist_start_mon|awk '{printf "%2.2d",$1}' -`
 mkdhist_end_year=`echo $mkdhist_end_year|awk '{printf "%3.3d",$1}' -`
 mkdhist_end_mon=`echo $mkdhist_end_mon|awk '{printf "%2.2d",$1}' -`

 [ -z "$mkdhist_start_year" ] && bail "mkdhist: mkdhist_start_year is null"
 [ -z "$mkdhist_end_year" ]   && bail "mkdhist: mkdhist_end_year is null"
 [ -z "$mkdhist_start_mon" ]  && bail "mkdhist: mkdhist_start_mon is null"
 [ -z "$mkdhist_end_mon" ]    && bail "mkdhist: mkdhist_end_mon is null"

 [ $mkdhist_start_mon -gt 12 -o $mkdhist_start_mon -lt 1 ] &&\
   bail "mkdhist: mkdhist_start_mon = $mkdhist_start_mon is out of range"
 [ $mkdhist_end_mon -gt 12 -o $mkdhist_end_mon -lt 1 ] &&\
   bail "mkdhist: mkdhist_end_mon = $mkdhist_end_mon is out of range"
 [ $mkdhist_start_year -gt $mkdhist_end_year ] &&\
   bail "mkdhist: mkdhist_start_year = $mkdhist_start_year is out of range"

 # mkdhist_suffix_list is a white space separated list of suffixes
 # of file names to be generated here. Any suffix in this list may be
 # modified by appending a + followed by a comma separated list of
 # numbers (no white space is allowed within this modifier). Each
 # number within the modifier list will correspond to a month (1-12)
 # for which a file with this suffix is to be included. If the
 # modifier exists for a particular suffix then only those months
 # indicated in the modifier will be added to the file list.
 mdump_suffix_list=''
 if [ "x$coupled" = "xon" ]; then
   mkdhist_suffix_list='cm gz gs ss os+12 cs+12 rs+12 ab+12 ob+12 an+12'
 else
   mkdhist_suffix_list='gs ss rs+12 ab+12 an+12'
 fi

 # ---Start_submit_ignore_code----

 # Include a CPP_I sections for testing
##CPP_I_START
##CPP_I_END

   # Generate a list of history files for all months
   mkdhist_curr_year=`echo $mkdhist_start_year|awk '{y=$1-1;printf "%3.3d", y}' -`
   join=0
   while [ $mkdhist_curr_year -lt $mkdhist_end_year ]; do
     mkdhist_curr_year=`echo $mkdhist_curr_year|awk '{y=1+$1;printf "%3.3d", y}' -`
     # Protect the year assignment from cccjob substitution
     eval year\=$mkdhist_curr_year
     if [ $mkdhist_curr_year -eq $mkdhist_start_year ]; then
       mm=`echo $mkdhist_start_mon|awk '{m=$1-1;printf "%2.2d", m}' -`
     else
       mm=0
     fi
     if [ $mkdhist_curr_year -eq $mkdhist_end_year ]; then
       mm_end=$mkdhist_end_mon
     else
       mm_end=12
     fi
     while [ $mm -lt $mm_end ]; do
       mm=`echo $mm|awk '{m=1+$1;printf "%2.2d", m}' -`
       # Protect the mon assignment from cccjob substitution
       eval mon\=$mm

       bname=${mkdhist_prefix}_${mkdhist_curr_year}_m${mm}_

       # mkdhist_name_monthly, if defined, will be used to define
       # the file name (up to the suffix)
       name_monthly=''
       mkdhist_name_monthly=${name_monthly:-$bname}

       # mkdhist_name_monthly is a space separated list of file name templates
       # No file name template in this list may contain whitespace
       for bname in $mkdhist_name_monthly; do
         if [ -n "$mkdhist_suffix_list" ]; then
           # Iterate over the suffixes in mkdhist_suffix_list
           for suffix in $mkdhist_suffix_list; do
             mlist=`echo $suffix|awk -F'+' '{print $2}' -`
             mlist=`echo $mlist|sed 's/,/ /g'`
             if [ -n "$mlist" ]; then
               suffix=`echo $suffix|sed 's/+.*$//'`
               # assume that mlist is a white space separated list of numbers
               # indicating which months to dump
               for xx in $mlist; do
                 if [ $xx -eq $mm ]; then
                   join=`echo $join|awk '{j=1+$1;printf "%d",j}' -`
                   join=`echo $join|sed -e 's/^ *//' -e 's/^0*//'`
                   eval file${join}=$bname$suffix
                 fi
               done
             else
               join=`echo $join|awk '{j=1+$1;printf "%d",j}' -`
               join=`echo $join|sed -e 's/^ *//' -e 's/^0*//'`
               eval file${join}=$bname$suffix
             fi
           done
         else
           # Use the name templates in mkdhist_name_monthly "as is"
           join=`echo $join|awk '{printf "%d",$1+1}' -`
           join=`echo $join|sed -e 's/^ *//' -e 's/^0*//'`
           eval file${join}=$bname
         fi
       done

     done
   done

 # Create a set of dummy files and save them

 n=0
 while [ $n -lt $join ]; do
   n=`expr $n + 1`
   eval curr_fname=\$file$n
   if [ -n "$clone" ]; then
     # Copy the existing file named ${clone}_mXX_YY

     # Ensure that clone does not end with an underscore
     clone_t=`echo $clone|sed 's/_*$//'`
     eval clone\=$clone_t

     # Extract the suffix and month from the current file name
     # These are that last 2 underscore separated fields in the file name
     sfx_fld=`echo $curr_fname|awk '{n=split($0,a,/_/); print a[n]}' -`
     mon_fld=`echo $curr_fname|awk '{n=split($0,a,/_/); n=n-1; print a[n]}' -`

     # Access the existing file, using curr_fname as the local name
     # This access will fail if the (supposedly) existing files doesn't exist
     access $curr_fname ${clone}_${mon_fld}_${sfx_fld}
   else
     # Create a fixed size file filled with zeros

     mkdhist_size=102400
     [ -z "$mkdhist_size" ] && bail "mkdhist_size is null or not set"

     dd if=/dev/zero of=$curr_fname count=$mkdhist_size
   fi
   save $curr_fname $curr_fname
   release $curr_fname
 done

 # ---Stop_submit_ignore_code----

#  * ............................ Condef Parameters ............................

 noprint=on
 nextjob=on

end_of_script

 . endjcl.cdk

#end_of_job
@;
$JOBDEF{description}{mkdhist} = "make dummy history files";
$JOBDEF{hide}{mkdhist} = "yes";
#******************************************************************************
#*******************           END of mkdhist              ********************
#******************************************************************************

###############################################################################
####################                mkdiag                ####################
###############################################################################
$JOBDEF{job}{mkdiag} =
q@#!/bin/sh
#************************************************************************
#**********************  make dummy diagnostic files  **********************
#************************************************************************

 set -a
 . betapath2

 username="acrnxxx"; user="XXX"; crawork="${runid}_job"

 nextjob=on
 noprint=on

 coupled=on

 CCRNTMP=$CCRNTMP
 RUNPATH=$RUNPATH

 jobname=mkdiag; time="600" ; memory="900mb" ;

 . comjcl.cdk

cat > Execute_Script <<'end_of_script'

  # ---Start_submit_ignore_code----
  set -a

  # bail is a simple error exit routine
  bail(){
    echo "mkdiag: "$1
    exit 1
  }

#  * ........................... Parmsub Parameters ............................

  # If clone is defined it must be a file name (without the trailing _mXX_gs)
  # Files with this prefix will be copied to new names defined by the current
  # uxxx, runid and year range. If any of these files do not exist the job
  # will fail.
  mkdiag_clone=''
  eval clone\=$mkdiag_clone

  uxxx=''; runid="job000"

  # These variables are set when the job string is created
  previous_year=NotSet
  previous_month=NotSet

  current_year=NotSet
  current_month=NotSet

  next_year=NotSet
  next_month=NotSet

  run_start_year=NotSet
  run_start_month=NotSet
  run_stop_year=NotSet
  run_stop_month=NotSet

  # The variables mkdiag_prefix_list and mkdiag_suffix_list are strings containing
  # embedded colons and whitespace which are interpreted as list delimiters.
  # These strings may be thought of as 2 dimensional arrays, the rows of these
  # arrays are colon (:) separated strings and each row is divided into columns
  # by separating on white space.
  # These variables are used by make_file_name_list along with runid and year/mon
  # information from the *_year and *_month variables to generate file names.

  # In make_file_name_list the prefix_list and suffix_list strings are first
  # separated into colon delimited lists (rows). There must be a equal number of
  # rows in each of prefix_list and suffix_list because these rows will be
  # used in pairs.
  # Each pair of rows (one row from prefix_list and one row from suffix_list)
  # is separated into a white space separated list. Each element of these
  # white space separated lists is a single prefix or suffix (possibly modified
  # by appending a "+" followed by a comma separated list of integers in the
  # range 1-12). No white space is allowed within a single prefix or suffix.
  # These individual (pre|suf)fixes are then iterated over for each year and
  # month and for each pair of rows in prefix_list and suffix_list to form the
  # desired set of file names, each of which is of the form
  #
  # ${prefix}_${runid}_${year}_m${mon}_${suffix}

  # Any prefix or suffix in these  lists may be modified by appending a +
  # followed by a comma separated list of numbers (no white space is allowed
  # within this modifier). Each number within the modifier list will correspond
  # to a month (1-12) for which a file with this suffix is to be included.
  # If the modifier exists for a particular suffix then only those months
  # indicated in the modifier will be added to the file list.

  # If the above form of file name is inappropriate then the user may
  # provide a template or templates to produce arbitrary file names.
  # These templates are defined in the variable mkdiag_prefix_list.
  # Any individual prefix will be treated as a file name template if it
  # begins with a "%" character. The template will consist of everthing
  # after the "%" character and up to the next colon or white space.
  # It can be composed of anything but must ultimately (after variable
  # substitution, etc) result in a valid file name. When a template
  # is encountered, it is used as the entire file name (ie the "normal" file
  # name form is disregarded as is the corresponding suffix(s)). However, it
  # is subject to the same interation procedure as a normal prefix and
  # does undergo variable substitution. Variables that are defined for
  # substitution include year, mon, runid, uxxx, start_year, start_mon,
  # stop_year, stop_mon, all of the *_year and *_month variables defined
  # above as well as any user supplied variable definitions passed to
  # make_file_name_list as a command line option of the form var=val.

  mkdiag_suffix_list=''
  if [ $coupled = 'on' ]; then
    suffix_list="${mkdiag_suffix_list:-gp xp cp}"
  else
    suffix_list="${mkdiag_suffix_list:-gp xp}"
  fi

  mkdiag_prefix_list=''
  if [ $coupled = 'on' ]; then
    mkdiag_uxxx=dc
    prefix_list="${mkdiag_prefix_list:-$mkdiag_uxxx}"
  else
    mkdiag_uxxx=da
    prefix_list="${mkdiag_prefix_list:-$mkdiag_uxxx}"
  fi

  # Create a file containing a list of file names that may then be
  # "sourced" in the current environment to define the variables
  # file1, file2,..., file$join, join. These variables are used by
  # tdumper to compile the list of files to be archived.
  join=0

  # make_file_name_list uses the variables current_year, current_month,
  # previous_year, previous_month, next_year and next_month to
  # determine start and stop dates for file name creation.
  # It also uses runid, prefix_list and suffix_list from the current
  # environment to build these file names.

  # Allow user supplied command line options for make_file_name_list
  # The following invocation of make_file_name_list will not allow multi-list
  # output so if any command line option is supplied that will turn on
  # multi-list output (e.g. --months_max=... --size_max=... --number_max=..)
  # then this script will abort.
  mkdiag_file_list_opts=''
  fopts="${mkdiag_file_list_opts:-}"

  mkdiag_mon_offset=''
  if [ -n "$mkdiag_mon_offset" ]; then
    # Set a user supplied month offset
    eval fopts=\"$fopts --mon_offset\=$mkdiag_mon_offset\"
  fi

  # Create a temporary file containing the file list
  tmp_file_list="mkdiag_file_list_${runid}_${stamp}"
  make_file_name_list $fopts --nomulti_list $tmp_file_list 2>&1

  [ ! -s "$tmp_file_list" ] && bail "Unable to create file list"

  # A file list was created ...source it
  : ; . $tmp_file_list

  # Delete the file that contains the file list
  rm -f $tmp_file_list

  # At this point file1, file2,... and join are defined in the current
  # environment as well as certain other variables such as start_year,
  # start_mon, stop_year and stop_mon which correspond to the start and
  # stop dates for the file names that were created.
  ym_range="${start_year}m${start_mon}_${stop_year}m${stop_mon}"

  # Define module specific start/stop dates
  mkdiag_start_year=$start_year
  mkdiag_start_mon=$start_mon
  mkdiag_stop_year=$stop_year
  mkdiag_stop_mon=$stop_mon

  # Create a set of dummy files and save them

  n=0
  while [ $n -lt $join ]; do
    n=`expr $n + 1`
    eval curr_fname=\$file$n
    if [ -n "$clone" ]; then
      # Copy the existing file named ${clone}_mXX_YY

      # Ensure that clone does not end with an underscore
      clone_t=`echo $clone|sed 's/_*$//'`
      eval clone\=$clone_t

      # Extract the suffix and month from the current file name
      # These are that last 2 underscore separated fields in the file name
      sfx_fld=`echo $curr_fname|awk '{n=split($0,a,/_/); print a[n]}' -`
      mon_fld=`echo $curr_fname|awk '{n=split($0,a,/_/); n=n-1; print a[n]}' -`

      # Access the existing file, using curr_fname as the local name
      # This access will fail if the (supposedly) existing files doesn't exist
      access $curr_fname ${clone}_${mon_fld}_${sfx_fld}
    else
      # Create a fixed size file filled with zeros

      mkdiag_size=102400
      [ -z "$mkdiag_size" ] && bail "mkdiag_size is null or not set"

      dd if=/dev/zero of=$curr_fname count=$mkdiag_size
    fi
    save $curr_fname $curr_fname
    release $curr_fname
  done

  # ---Stop_submit_ignore_code----

#  * ............................ Condef Parameters ............................

  noprint=on
  nextjob=on

end_of_script

  . endjcl.cdk

#end_of_job
@;
$JOBDEF{description}{mkdiag} = "make dummy diagnostic files";
$JOBDEF{hide}{mkdiag} = "yes";
#******************************************************************************
#*******************           END of mkdiag              ********************
#******************************************************************************

###############################################################################
# Assign diagnostic deck hashes
###############################################################################

# Define a list of decks to be used in the diagnostic string

# Do not set $diag_deck{rank}, $diag_deck{ocean} or $diag_deck{comment}
# explicitly. Perl will provide a unique reference for this level in these
# hashes the first time $diag_deck{rank}{rank} etc are assigned values.
$diag_deck{rank}{rank}    = -9999;
$diag_deck{ocean}{rank}   = -9999;
$diag_deck{comment}{rank} = -9999;
my $i = 0;
foreach (qw(rtdiag35 del modinfo gpint mslpr_eta gpstats spvdqtz spmodinfo
          sphum spvort spdiv sptemp spintr vdelphi par_def_trac xstats xstats2 constat
          dust mom_stat eng_stat eng_stat2 eng_stat3 eng_stat4 wat_stat sfc_stat misc_stat
	  cloud8 lmlstat ts_save gppurg sppurg cp_save co_save cleanup)) {
  $diag_deck{$_} = 0;
  $diag_deck{rank}{$_} = $i++;
  $diag_deck{ocean}{$_} = 0;
  $diag_deck{comment}{$_} = '';
}
# Identify which diagnostic decks are used by default
$diag_deck{rtdiag35}  = 1;
$diag_deck{del}       = 1;
$diag_deck{modinfo}   = 1;
$diag_deck{gpint}     = 1;
$diag_deck{mslpr_eta} = 1;
$diag_deck{gpstats}   = 1;
$diag_deck{mom_stat}  = 1;
$diag_deck{eng_stat4} = 1;
$diag_deck{wat_stat}  = 1;
$diag_deck{misc_stat} = 1;
$diag_deck{cloud8}    = 1;
$diag_deck{sfc_stat}  = 1;
# $diag_deck{lmlstat}   = 1;
$diag_deck{gppurg}    = 1;
$diag_deck{sppurg}    = 1;
$diag_deck{cp_save}   = 1;
$diag_deck{cleanup}   = 1;

# Identify which diagnostic decks are for the ocean data only
$diag_deck{ocean}{rtdiag35} = 1;
$diag_deck{ocean}{cp_save}  = 1;
$diag_deck{ocean}{co_save}  = 1;

# Define comments to be used in the diagnostic job that will (may) be
# created by cccjob
$diag_deck{comment}{rtdiag35}  = "Run time diagnostics for ocean";
$diag_deck{comment}{del}       = "Compute delhat and beta";
$diag_deck{comment}{modinfo}   = "Add DATA DESCRIPTION section";
$diag_deck{comment}{gpint}     = "Create gpinit file";
$diag_deck{comment}{mslpr_eta} = "Compute msl pressure from temperature";
$diag_deck{comment}{gpstats}   = "Q,T,Z,VORT,U,V and W statistics";
$diag_deck{comment}{xstats}    = "Tracer statistics (tracer names are specified in variable \$trac)";
$diag_deck{comment}{xstats2}   = "Tracer statistics (tracer names are specified in variable \$trac)";
$diag_deck{comment}{constat}   = "Conservation statistics";
$diag_deck{comment}{dust}      = "Dust statistics";
$diag_deck{comment}{mom_stat}  = "Momentum related statistics";
$diag_deck{comment}{eng_stat2} = "Energy fluxes and budgets terms";
$diag_deck{comment}{eng_stat3} = "Energy fluxes and budgets terms";
$diag_deck{comment}{eng_stat4} = "Energy fluxes and budgets terms";
$diag_deck{comment}{wat_stat}  = "Water fields and balances";
$diag_deck{comment}{misc_stat} =
  "Miscellaneous physics fields:
  PBLH : height of planetary boundary layer
   TCV : top of convection
    DR : surface drag coefficients";
$diag_deck{comment}{cloud8}   = "Cloud related fields";
$diag_deck{comment}{sfc_stat} = "Basic stats for CLASS fields";
$diag_deck{comment}{lmlstat}  = "Lowest model level air statistics";
$diag_deck{comment}{ts_save}  = 'Accumulate certain time series in $atmos_file';
$diag_deck{comment}{gppurg}   = 'Purge any gp* files';
$diag_deck{comment}{sppurg}   = 'Purge any sp* files';
$diag_deck{comment}{cp_save}  = "Create cp file for ocean diagnostics";
$diag_deck{comment}{co_save}  = "Create co file for ocean diagnostics";
$diag_deck{comment}{cleanup} =
  "Purge temporary files, print zonal cross-section of certain files
 and inventory diagnostic files";

###############################################################################
# curr_LIBS is a colon separated list of directories. Here it is only used to
# look for the rcm_PARAMETERS file.
#
# curr_jobdefs is a colon separated list of directories that contain files
# with names of the form *_jobdef. Each *_jobdef file found in any curr_jobdefs
# dir will be read and used to define a job in the JOBDEF hash.
###############################################################################
my $curr_LIBS    = "$main::CCCJOB_LIB";
my $curr_jobdefs = "$main::CCCJOB_LIB/jobdefs";

# If it exists, prepend the cccjobs dir in the users home dir to curr_LIBS
if (-d "$ENV{HOME}/cccjobs") {
  $curr_LIBS = "$ENV{HOME}/cccjobs" . ':' . $curr_LIBS;
}

# Add jobdef files found in the users ~/cccjobs dir to JOBDEF before any others
# This will ensure that the jobs found in the users home dir will always take
# precedence over any other jobs with the same name
if (-d "$ENV{HOME}/cccjobs") {
  my $jobs_dir = "$ENV{HOME}/cccjobs";

  # Read all file names of the form *_jobdef from the current dir
  opendir(DIR, $jobs_dir) or die "Unable to open dir $jobs_dir  $!";
  my @job_file_basenames = sort grep { /_jobdef$/ } readdir DIR;
  closedir(DIR);

  foreach my $job_file_basename ( @job_file_basenames ) {
    my $job_file_name = "$jobs_dir/$job_file_basename";

    # Read the contents of the file into a temporary string
    my $job_contents = '';
    open(JOB_FILE, "<$job_file_name") or
        die "\n*** ERROR *** Cannot open job file $job_file_name.\n Stopped";
    # Abort if the job file is empty
    die "\n*** ERROR *** Job file $job_file_name is empty.\n Stopped" if -z JOB_FILE;
    while (<JOB_FILE>) {$job_contents .= $_};
    close(JOB_FILE);

    my $warn_if_def = 1;
    assign_JOBDEF($jobs_dir, $job_file_basename, $job_file_name, $job_contents, $warn_if_def);
  }
}

# If jobdef files are to be extracted from a git repo then do this after files read
# from the users ~/cccjobs dir but before any files read from curr_jobdefs
# This means files in the git repo take precedence over files in curr_jobdefs
my $use_JGIT = 0;
if ( $main::JGIT{repo} or $main::JGIT{rev} or $main::JGIT{path} ) {
  my $curr_JGIT_repo = "";
  my $curr_JGIT_mach = "";
  my $curr_JGIT_path = "";
  $use_JGIT = 1;

  # The revision will default to HEAD if not defined by the user
  $main::JGIT{rev} = "HEAD" unless $main::JGIT{rev};

  # JGIT_repo is a directory path name (possibly on a remote machine) that points to a
  # git repository in which lives a dir containing job definition files named *_jobdef
  if ( $main::JGIT{repo} ) {
    # Use files found in the git repository defined in $main::JGIT{repo}

    # Check for the invalid case of JGIT_repo ending with a colon
    die "Invalid --jdef-repo --> $main::JGIT{repo} <--\n  Stopped" if $main::JGIT{repo} =~ /:$/;

    # JGIT->repo must be of the form [rem_mach:]path_to_repo (rem_mach is optional)
    # Verify that this is true
    my ($rem_mach, $path_to_repo);
    my @repo_parts = split(/:/, $main::JGIT{repo});
    if ( scalar(@repo_parts) == 2 ) {
      # Both remote machine and path name were supplied
      $curr_JGIT_mach = $repo_parts[0];
      $curr_JGIT_repo = $repo_parts[1];
      die "Invalid --jdef-repo --> $main::JGIT{repo} <-- missing rem mach.\n  Stopped"
          unless $curr_JGIT_mach;
      die "Invalid --jdef-repo --> $main::JGIT{repo} <-- missing path.\n  Stopped"
          unless $curr_JGIT_repo;
      # Define JGIT->mach and reassign JGIT->repo
      $main::JGIT{repo} = $curr_JGIT_repo;
      $main::JGIT{mach} = $curr_JGIT_mach;
    } elsif ( scalar(@repo_parts) == 1 ) {
      # Only path name part was supplied
      $curr_JGIT_repo = $repo_parts[0];
      die "Invalid --jdef-repo --> $main::JGIT{repo} <-- missing path.\n  Stopped"
          unless $curr_JGIT_repo;
      die "--jdef-repo --> $curr_JGIT_repo <-- is not a dir\n Stopped"
          unless -d $curr_JGIT_repo;
    } else {
      # JGIT_repo is not of the form [rem_mach:]path_to_repo
      die "Invalid --jdef-repo --> $main::JGIT{repo} <-- too many colons.\n  Stopped";
    }
  } else {
    # Define a default git repository
    $curr_JGIT_repo = "$ENV{CCRNSRC}/cccjob_dir/cccjob.git";
    die "Default git repo --> $curr_JGIT_repo <-- is not a dir\n  Stopped"
        unless -d $curr_JGIT_repo;
    $main::JGIT{repo} = $curr_JGIT_repo;
  }

  # JGIT_path is a directory path name, relative to the root of the git repository,
  # pointing to a dir containing job definition files named *_jobdef
  if ( $main::JGIT{path} ) {
    # The user has supplied a git repo jobdef dir
    $curr_JGIT_path = $main::JGIT{path};
  } else {
    # Use a default git repo jobdef dir
    $curr_JGIT_path = "lib/jobdefs";
    $main::JGIT{path} = $curr_JGIT_path;
  }
  # Strip any leading "/" from JGIT->path
  $curr_JGIT_path =~ s@^/*@@;
  $main::JGIT{path} =~ s@^/*@@;

  if (0) {
    print "JGIT->mach = $main::JGIT{mach}\n" if $main::JGIT{mach};
    print "JGIT->repo = $main::JGIT{repo}\n" if $main::JGIT{repo};
    print "JGIT->rev  = $main::JGIT{rev}\n"  if $main::JGIT{rev};
    print "JGIT->path = $main::JGIT{path}\n" if $main::JGIT{path};
    print "\n";
  }

  # On certain clusters the ssh comand must be run from a particular node
  # In these cases we need to reroute the ssh command through this node
  use Sys::Hostname;
  my $host = hostname();
  $host =~ s/^(.*?)\..*$/$1/;
  my $ROUTE_SSH = "";
  if ( $host =~ /^ib3/ ) {
    $ROUTE_SSH = "ssh pollux";
  } elsif ( $host =~ /^c1/ ) {
    $ROUTE_SSH = "ssh spica";
  } elsif ( $host =~ /^c2/ ) {
    $ROUTE_SSH = "ssh hadar";
  }

  # Ensure we have git available, if not specify a default path to git
  # Define a command that will return a path to git, if successful
  my $locate_git = "which git";
  if ( $curr_JGIT_mach ) {
    # We need the path to git on a remote machine
    if ( $ROUTE_SSH ) {
      $locate_git = "$ROUTE_SSH \'ssh $curr_JGIT_mach \"$locate_git\"\'";
    } else {
      $locate_git = "ssh $curr_JGIT_mach \"$locate_git\"";
    }
  }

  # Attempt to determine a path to git
  chomp(my $GIT = `$locate_git 2>&1`);
  if ( $? ) {
    # If unable to determine a path to git then hard code it to a known location
    warn "$GIT\n";
    warn "Unable to determine a path to git. Using a default path.\n";
    if ( $curr_JGIT_mach ) {
      # We need the path to git on a remote machine
      my $rmhost = $curr_JGIT_mach;
      $rmhost =~ s/^(.*?)\..*$/$1/;
      if ( $rmhost =~ /^c1/ or $rmhost =~ /^spica/ ) {
        $GIT = "/users/tor/acrn/rls/local/aix64/bin/git";
      } elsif ( $rmhost =~ /^c2/ or $rmhost =~ /^hadar/ ) {
        $GIT = "/users/tor/acrn/rls/local/aix64/bin/git";
      } elsif ( $rmhost =~ /^ib3/ or $rmhost =~ /^pollux/ ) {
        $GIT = "/usr/bin/git";
      } else {
        # Any other machine must have git available so we should never get here
        die "Unable to determine a location for git.\n  Stopped";
      }
    } else {
      # Set the path according to the current machine name
      $GIT = '';
      if ( $host =~ /^ib3/ ) {
        $GIT = "/usr/bin/git";
      } elsif ( $host =~ /^c1/ ) {
        $GIT = "/users/tor/acrn/rls/local/aix64/bin/git";
      } elsif ( $host =~ /^c2/ ) {
        $GIT = "/users/tor/acrn/rls/local/aix64/bin/git";
      }
    }
  }

  # git is required
  die "git is not available.\n Stopped" unless $GIT;

  # Get a list of all jobdef files found in the git repo
  my $cmd = "cd $curr_JGIT_repo ; $GIT ls-tree -r --name-only $main::JGIT{rev}:$curr_JGIT_path";
  if ( $curr_JGIT_mach ) {
    # The user has supplied a machine name on which the git repo may be found
    if ( $ROUTE_SSH ) {
      $cmd = "$ROUTE_SSH \'ssh $curr_JGIT_mach \"$cmd\"\'";
    } else {
      $cmd = "ssh $curr_JGIT_mach \"$cmd\"";
    }
  }
  chomp(my @sh_out = `$cmd 2>&1`);
  my $sh_err = $?;
  if ( $sh_err ) {
    foreach (@sh_out) {print "$_\n"}
    die "** EE **  Problem executing\n   $cmd\n";
  }

  # Populate JOBDEF with all jobdefs found in the git repo
  foreach (@sh_out) {
    # Ignore any files with names not of the form *_jobdef
    next unless /_jobdef$/;
    my $cmd = "cd $curr_JGIT_repo ; $GIT cat-file blob $main::JGIT{rev}:${curr_JGIT_path}/$_";
    if ( $curr_JGIT_mach ) {
      # The user has supplied a machine name on which the git repo may be found
      if ( $ROUTE_SSH ) {
        $cmd = "$ROUTE_SSH \'ssh $curr_JGIT_mach \"$cmd\"\'";
      } else {
        $cmd = "ssh $curr_JGIT_mach \"$cmd\"";
      }
      print ".";
    }
    my $job_contents = `$cmd 2>&1`;
    if ( $? ) {
      print "\n" if $curr_JGIT_mach;
      print "$job_contents\n";
      die "** EE **  Problem executing\n   $cmd\n";
    }
    my $jobs_dir          = ${curr_JGIT_path};
    my $job_file_basename = $_;
    my $job_file_name     = "${curr_JGIT_repo} + $jobs_dir/$job_file_basename";
    assign_JOBDEF($jobs_dir, $job_file_basename, $job_file_name, $job_contents);
  }
  print "\n" if $curr_JGIT_mach;
}

# Read jobs from files in any $curr_jobdefs directory.
# Each file of the form *_jobdef is read and the contents of that file are
# assigned to the JOBDEF hash and associated with a keyword that is read from
# the file itself or derived from the file name. If the file contains a line
# of the form /#[#\s]*\w*\s*description\s*::.*/ then everything after the ::
# on that line is used as the job description.
foreach my $jobs_dir (split ':',$curr_jobdefs) {
  # Ignore anything that is not a directory
  next unless -d $jobs_dir;

  # Read all file names of the form *_jobdef from the current dir
  opendir(DIR, $jobs_dir) or die "Unable to open dir $jobs_dir  $!";
  my @job_file_basenames = sort grep { /_jobdef$/ } readdir DIR;
  closedir(DIR);

  foreach my $job_file_basename ( @job_file_basenames ) {
    my $job_file_name = "$jobs_dir/$job_file_basename";
    if (0) {
      print "job_file_basename=$job_file_basename\n";
      print "job_file_name=$job_file_name\n";
    }

    # Read the contents of the file into a temporary string
    my $job_contents = '';
    open(JOB_FILE, "<$job_file_name") or
        die "\n*** ERROR *** Cannot open job file $job_file_name.\n Stopped";
    # Abort if the job file is empty
    die "\n*** ERROR *** Job file $job_file_name is empty.\n Stopped" if -z JOB_FILE;
    while (<JOB_FILE>) {$job_contents .= $_};
    close(JOB_FILE);

    my $warn_if_def = 1;
    $warn_if_def = 0 if $use_JGIT;
    assign_JOBDEF($jobs_dir, $job_file_basename, $job_file_name, $job_contents, $warn_if_def);
  }
}

# Sub used above to assign elements of JOBDEF after they have been defined
sub assign_JOBDEF {
  my $jobs_dir          = shift;
  my $job_file_basename = shift;
  my $job_file_name     = shift;
  my $job_contents   = shift;
  my $warn_if_defined   = shift;

  # determine a keyword associated with this job
  # First check for a keyword definition in the file itself
  my $job_name = '';
  ($job_name) = ($job_contents =~ /^\s*#[#\s]*keyword\s*::\s*(\w+)/m);
  unless ($job_name) {
    # If the keyword is not found in the file itself then use the file
    # name, excluding the trailing _jobdef extension, as the keyword
    ($job_name) = ($job_file_basename =~ /^(.*)_jobdef$/);
  }
  unless ($job_name) {
    die "Unable to determine a keyword for file $job_file_name\n Stopped"
  }
  # remove the keyword definition from the job string
  $job_contents =~ s/^\s*#[#\s]*keyword\s*::.*$/#/m;

  # Only use keywords the first time they are encountered
  # This implies that existing job descriptions will never be overwritten
  # if they are created using this subroutine
  # NOTE: Jobdefs found in the users ~/cccjobs subdir will take precedence over
  # those found in a user supplied git repo, which will in turn take precedence
  # over jobdefs found in CCCJOB_LIB/jobdefs
  if (defined $JOBDEF{job}{$job_name}) {
    if ( $warn_if_defined ) {
      warn "*** WARNING *** The keyword $job_name is already in use
              File $job_file_name is ignored\n";
    }
    return;
  }

  # Assign the contents of the file to JOBDEF{job} under keyword $job_name
  $JOBDEF{job}{$job_name} = $job_contents;

  # Add the file name in which this job string was found
  $JOBDEF{path}{$job_name} = $job_file_name;

  # This is the real MacCoy
  $JOBDEF{clone}{$job_name} = 0;

  # determine if this file is user supplied or from lib/jobdefs
  if ("$jobs_dir" eq "$ENV{HOME}/cccjobs") {
    $JOBDEF{user_supplied}{$job_name} = 1;
  } else {
    $JOBDEF{user_supplied}{$job_name} = 0;
  }

  # Read a short job description from the file
  my ($desc) = ($JOBDEF{job}{$job_name} =~ /^\s*#[#\s]*\w*\s*description\s*::\s*(.*)$/m);
  if ($desc) {
    $JOBDEF{description}{$job_name} = $desc;
  } else {
    $JOBDEF{description}{$job_name} = '';
  }

  # Look for a "hide" directive in the file
  my ($hide) = ($JOBDEF{job}{$job_name} =~ /^\s*#[#\s]*hide\s*::\s*(\w+)/m);
  if ($hide) {
    $JOBDEF{hide}{$job_name} = $hide;
  } else {
    $JOBDEF{hide}{$job_name} = '';
  }

  # remove the hide directive from the job string
  $job_contents =~ s/^\s*#[#\s]*hide\s*::.*$/#/m;
  $JOBDEF{job}{$job_name} = $job_contents;
}

if (0) {
  # href will point to a hash whose keys are job names
  my $href = $JOBDEF{path};
  foreach (sort keys %{$href}) {
    printf "%20s :: %s\n", $_,$JOBDEF{path}{$_};
  }
  print "Total number of jobs is ",scalar(keys %{$href}),"\n";
}

###############################################################################
# Define RCM_PARAMS
# Look for a file named rcm_PARAMETERS on curr_LIBS dirs and assign RCM_PARAMS
# with the contents of this file.
###############################################################################
$RCM_PARAMS = '';
foreach my $jd (split ':',$curr_LIBS) {
  next unless (-d $jd);
  if (-r "$jd/rcm_PARAMETERS") {
    if (open(PARAMETERS, "<$jd/rcm_PARAMETERS")) {
      # abort if the job file file is empty
      if (-z PARAMETERS) {
        die "\n*** ERROR *** RCM parameters file $jd/rcm_PARAMETERS is empty.\n Stopped";
      };
      while (<PARAMETERS>) {$RCM_PARAMS .= $_};
      close(PARAMETERS);
    } else {
      die "\n*** ERROR *** Cannot open RCM parameters file $jd/rcm_PARAMETERS.\n Stopped";
    }
  }
}

# Ensure a true value is returned
1;
