#!/bin/sh
# NAME:         autoDaily
# PURPOSE:      This is the wrapper script to handle parts of the daily workflow:
#		- the PREprocessing workflow for new data available in NGAS
#		- the PROcessing workflow for CALIB data (execAB and execQC)
#		- the final update of affected HC reports
# AUTHOR:       Reinhard Hanuschik
# DATE:         January 2006
# VERSION:      1.0 -- created from scratch (January 2006)
#		1.0.1- improved test for other sessions running (2006-02-07)
#		1.0.2- improved logging (2006-02-16)
#		1.0.3- made compatible with createAB v2.x (2006-03-14)
#		1.0.4- include checkConsist (2006-06-01)
#		1.0.5- checks for measureQ disabled (2006-10-06)
#		1.0.6- check for NotValid enabled (2006-10-30)
#		1.1 -- improved check for autoDaily running; plugin PGI_PREPROC supported; new option -C to jump into autoDaily at AB creation (2006-12-04)
#		1.1.1- dvdMonitor replaced by ngasMonitor (2007-01-10)
#		1.2 -- has configurable download mechanism (PROCESSAB vs. DATACLIENT) (2008-01-22)
#		1.2.1- improved handling of completeness flag (2008-01-30)
#		1.2.2- download mechanism enforced to be PROCESSAB (2008-04-10)
#		1.3 -- CURRENT_SKIP introduced (2008-07-29) [BW]
#		1.4 -- ENABLE_INCREM introduced, for incremental processing; new option -D (2008-09-16)
#		1.4.1- calls createJob -F --> getStatusAB exports links; always creates AB monitor (2009-03-25)
#   		1.4.2- enforce calling at least CURRENT date (unless CURRENT_SKIP=YES) (2009-05-11)
#		2.0 -- turned into master tool: calling qc1Parser and ngasClient; new key FLASHBACK; dataclient removed (2009-07-21)
#		2.0.1- bug fixed with workflow for "no new data found"; PARSER_SKIP_AVAIL: supports qc1Parser -n (2010-01-22)
#		2.0.2- line 959: simplify and support even non-standard trendPlotter calls with more than 3 parameters (2010-04-26)
#		2.0.3- simplified handling if no new jobs found (2010-05-03)
#		2.0.4- execution time written to QC1 database "exec_time" [MNe]; PARSER_SKIP_AVAIL prepared for decommissioning (2010-11-29)
#		2.0.5- bug fixed with exec_time; PARSER_SKIP_AVAIL removed (2010-12-20)
#		2.0.6- cleaned up for PARSER_SKIP_AVAIL (2011-01-14)
#		2.0.7- process_load added to QC1 database table exec_time (2011-03-16)
#		2.0.8- call dfoMonitor in background in exitProcs (2012-09-13)
#		2.0.9- exitProcs gets flag (timeout|other|finished) (2012-09-19)
#		2.1 -- removed: createAB -z; obsolete: CURRENT_SKIP, ENABLE_INCR, AUTO_EXEC, MAIL_NOTIF; new option -r $RESOURCE (for migration) (2013-02-15)
#		2.1.1- include call of ngasClient -U to update NGAS download servers (2013-11-29)
#		2.1.2- PATH edited for SL63 (2014-03-10)
#		2.2 -- removed: checkConsist (2014-04-28)
#		2.3 -- removed: qc1Parser, reference to fastcache (2014-10-31) [BWo]
#		2.4 -- new option -F[orced]; option -C dropped (EXEC_MODE=JUMP) [RHa]; hdr download for date-1 if 21 or 22; CHECK_OTHER improved [BWo] (2015-06-30)
#		2.4.1- bug fix for QCBQS_TYPE=PAR (2015-07-27)
#		2.4.2- delete log files older than 2 years; recognize and kill 'createAB -r' session (2015-09-02)
#		2.5 -- provides the list_calBadQual.dat for the calChecker (v4.6) badQual interface; RESOURCE removed (2015-10-26)
#		2.5.1- always download hdrs, not only in forced mode (2015-11-09)
#		2.5.2- stabilized against database error: suppress 'Datab' error coming from temporary unavailability of database (2016-02-23)
#		2.5.3- source .bashrc instead of .qcrc and .dfosrc (2018-05-17, BWo)
#		2.5.4- sourcing and checks for qcrc and dfosrc removed (2018-06-16)
#		2.5.5- call of ngasClient -U removed (2024-03-06)
#
# PARAMETERS:	-h -v [-C] [-D]
# TOOLS CALLED:	as a workflow tool, autoDaily calls ngasMonitor, createAB, 
#		createJob; createReport to refresh the daily report; processAB and ngasClient
# CONFIG:       config.autoDaily plus all config files of wrapped tools, 
#		plus config.calChecker for $PSO_EMAIL, plus config.processQC for $QCBQS_TYPE
# OUTPUT:	CALIB ABs, JOB_FILE_AUTO; if configured, the job file is executed and delivers
#		pipeline products plus QC reports
#		
# COMMENTS:     This is the DFOS tool for automatic creation and execution of CALIB ABs
#		for new data. It is designed to run as cronjob but can also be invoked from the command line.
# =============================================================================
TOOL_VERSION="2.5.5"
TOOL_NAME="autoDaily"

IMG_URL="http://www.eso.org/qc/ALL/img"
HC_URL="http://www.eso.org/qc/${DFO_INSTRUMENT}/reports"
QCWEB_URL="http://qcweb.hq.eso.org/${DFO_INSTRUMENT}/logs"

# for createReport wrapper (documented under createWrapper)
SLEEP=10        #check time: every SLEEP sec we check again up to $SLEEP_MAX
SLEEP_MAX=200   #max waiting time: if createReport takes longer than that, we kill it

# =============================================================================
# 0.  Preparation
# 0.1 initialize environment (tool to be run as cronjob)
# =============================================================================

# BWo, 2018-05-17 (needed for calling in cronjob environment which is the normal case)
source $HOME/.bashrc

# set other environment variables (required for isql)
export SYBASE=/opt/sybase
#export PATH=/opsw/util/python/bin:${PATH}:/opt/sybase/bin:/opt/msql2/bin
#for SL63:
#export PATH=/opsw/util/python/bin:${PATH}:/opt/sybase/bin:/opt/msql2/bin/:/opt/python/bin/python

# upload of logfile
WEB_DIR="/home/qc/qc/${DFO_INSTRUMENT}/reports"         # on $DFO_WEB_SERVER

# =============================================================================
# 0.2 Check for DFO variables
# =============================================================================

CHECK=`printenv | grep DFO`

if [ "Q$CHECK" = "Q" ]
then
        echo "*** ERROR: DFO variables not defined. Check ~/.dfosrc and restart."
        exit -1
fi

TITLE_COLOR="#006A9D"	#eso-blue

# =============================================================================
# 0.3 get options
# =============================================================================

EXEC_MODE=FULL
FORCED_MODE=NO	#hidden option, for php interface

while getopts DFhv OPTION
do
        case "$OPTION" in
         v ) echo "$TOOL_VERSION"
             exit 0 ;;
         h ) cat $DFO_DOC_DIR/autoDaily.h | more
             exit 0 ;;
	 D ) echo "Edit \$DFO_MON_DIR/list_data_dates with the date(s) to process; then hit return to continue:"
		read input 
		EXEC_MODE=NO_NGAS ;;
	 F ) FORCED_MODE=YES ;;
	 * ) echo "Unknown option, check tool help."; exit ;;
        esac
done

export HOSTNAME=$DFO_MACHINE

# =============================================================================
# 0.4 read config info
# =============================================================================

CONFIG=$DFO_CONFIG_DIR/OCA

export USER=`grep USER			$DFO_CONFIG_DIR/config.autoDaily | awk '{print $2}'`

DISK=`grep "^DATA_DISK"                 $DFO_CONFIG_DIR/config.dfoMonitor | awk '{print $2}'`
DISK_SPACE=`grep "^DISK_SPACE" 		$DFO_CONFIG_DIR/config.autoDaily | awk '{print $2}' | sed "s/\%//"`

PSO_EMAIL=`grep "^PSO_EMAIL"		$DFO_CONFIG_DIR/CALCHECK/config.calChecker | awk '{print $2}'`

# timeout for waiting for other processes
OTHER_TIMEOUT=`grep "^OTHER_TIMEOUT" 	$DFO_CONFIG_DIR/config.autoDaily | awk '{print $2}'`
if [ "Q$OTHER_TIMEOUT" = "Q" ]
then
	OTHER_TIMEOUT=50 #50 minutes, since after 1 hour the next process may be triggered
fi

if [ $OTHER_TIMEOUT -gt 50 ]
then
        OTHER_TIMEOUT=50 #50 minutes, since after 1 hour the next process may be triggered
	echo "***INFO: \$OTHER_TIMEOUT set to 50 [minutes], since next autoDaily to be triggered in 1 hour."
fi

OTHER_TIMEOUT1=`echo $OTHER_TIMEOUT | awk '{print $1*60}'` # convert to seconds

if [ ! -s $CONFIG/config.createAB ]
then
	echo "***ERROR: $CONFIG/config.createAB not existing. Exit."
	exit -1
fi

JOB_FILE_NAME="JOBS_NIGHT"
JOB_FILE_AUTO="JOBS_AUTO"

PGI_PREPROC=`grep "^PGI_PREPROC"	$DFO_CONFIG_DIR/config.autoDaily | awk '{print $2}'`
if [ "Q$PGI_PREPROC" = "Q" ]
then
	PGI_PREPROC=NONE
else
	if [ ! -s $DFO_BIN_DIR/$PGI_PREPROC ]
	then
		echo "*** WARNING: \$PGI_PREPROC defined as $PGI_PREPROC but not found under \$DFO_BIN_DIR. Will be ignored."
		echo ""
		PGI_PREPROC=NONE
	fi
fi

# flashback handling in incremental mode
FLASHBACK=`grep ^FLASHBACK 	$DFO_CONFIG_DIR/config.autoDaily | awk '{print $2}'`
if [ Q$FLASHBACK != QYES ]
then
	FLASHBACK=NO
fi

# update NGAS servers (removed with 2.5.5, not needed after de-commissioning of ngamsCClient)
#ngasClient -U

# =============================================================================
# 0.5 procedures 
# 0.5.1 getLastStatus
# =============================================================================

getLastStatus(){
LAST_STATUS=`grep "$D" $DFO_MON_DIR/DFO_STATUS | tail -1 | awk '{print $1}'`
REQ_ID=`grep "$D" $DFO_MON_DIR/DFO_STATUS | tail -1 | awk '{print $4}'`
if [ "Q$LAST_STATUS" = "Q" ]
then
	LAST_STATUS=NONE
fi
}

# =============================================================================
# 0.5.2 exitProcs
# $1: flag ('timeout' or 'other'; default: 'finished')
# =============================================================================

exitProcs(){
if [ Q$1 = Q ]
then
	FLAG=finished
elif [ $1 = terminated ]
then
	FLAG="$1 since automatic processing of $DFO_INSTRUMENT data on $HOSTNAME is already going on.<br>                 	Please wait for the results of that automatic processing"
	echo "<html> <META HTTP-EQUIV=\"Refresh\" CONTENT=\"10\">  <META HTTP-EQUIV=\"Cache-Control\" CONTENT=\"NO-CACHE\"> <pre>" > $LOG_FILE
	echo "<img src=/observing/dfo/quality/ALL/img/php-warning.png height=30 width=30>" >> $LOG_FILE
	rm -f $TMP_DIR/HC_ENFORCE.lock
else
	FLAG=$1
fi

# suppress useless error messages	
dfoMonitor -a -q 2>/dev/null & 

if [ $FORCED_MODE = YES ]
then
	writeLog "Forced processing ${FLAG}."
	echo "</pre> </html>" >> $LOG_FILE
	scpLog
else
	writeLog "autoDaily ${FLAG}"
fi

cat $LOG_FILE | grep -v "^<" >> $FINAL_LOG
rm -f $TMP_DIR/message
}

# =========================================================================
# 0.5.3 writeLog
# =========================================================================
writeLog(){
TIMESTAMP=`date -u +%Y-%m-%d" "%H:%M:%S`
echo "$TIMESTAMP	$1" >> $LOG_FILE
}

# =========================================================================
# 0.5.4 scpLog: for FORCED_MODE, scp log
# =========================================================================
scpLog(){
chmod a+w $LOG_FILE
scp -p $LOG_FILE ${DFO_WEB_SERVER}:${WEB_DIR}/php/autolog.html 1>/dev/null 2>/dev/null
}

# =========================================================================
# 0.6 createWrapper: create a wrapper around createReport 
#     (taken from calChecker)
# We create a wrapper around createReport and call it in the background, to protect
# ourselves against database connection issues in particular with the PANL database
# on Paranal.
# There is a timeout parameter which skips createReport in case of troubles.
# SLEEP: basic beat for checking of activity
# SLEEP_MAX: timeout after which job is killed (interpreted as being hanging)
# We loop over SLEEP to minimize waiting time for successful execution.
# $1: passed $DATE
# =========================================================================
createWrapper(){
cat > $TMP_DIR/wrappedCreateReport <<EOT
#!/bin/sh
dfosCron -t HdrDownloader &

PID=\$!
ALREADY_SLEPT=0
sleep 3

while [ \$ALREADY_SLEPT -lt $SLEEP_MAX ]
do
        CHECK_PID=\`ps \$PID | grep -v PID  | awk '{print \$6}'\`
        if [ Q\${CHECK_PID} != Q ]
        then
                ALREADY_SLEPT=\`echo \$ALREADY_SLEPT $SLEEP | awk '{print \$1+\$2}'\`
                sleep $SLEEP
        else
                break
        fi
done

CHECK_PID=\`ps \$PID | grep -v PID | awk '{print \$6}'\`
if [ Q\${CHECK_PID} != Q ]
then
        kill -9 \$PID 2>&1 1>/dev/null
	TIMESTAMP=\`date +%Y-%m-%d" "%H:%M:%S\`

	echo "[\$TIMESTAMP] 	autoDaily: time out in HdrDownloader." > $TMP_DIR/auto_mail
	mail -s "autoDaily timed out in HdrDownloader." $OP_ADDRESS < $TMP_DIR/auto_mail
        echo "TIMEOUT: HdrDownloader killed after $SLEEP_MAX secs."
fi

createReport -d \$1 &
PID=\$!
ALREADY_SLEPT=0

while [ \$ALREADY_SLEPT -lt $SLEEP_MAX ]
do
        CHECK_PID=\`ps \$PID | grep -v PID  | awk '{print \$6}'\`
        if [ Q\${CHECK_PID} != Q ]
        then
                ALREADY_SLEPT=\`echo \$ALREADY_SLEPT $SLEEP | awk '{print \$1+\$2}'\`
                sleep $SLEEP
        else
                break
        fi
done

CHECK_PID=\`ps \$PID | grep -v PID | awk '{print \$6}'\`
if [ Q\${CHECK_PID} != Q ]
then
        kill -9 \$PID 2>&1 1>/dev/null
	TIMESTAMP=\`date +%Y-%m-%d" "%H:%M:%S\`

	echo "[\$TIMESTAMP] 	autoDaily: time out in createReport -d \$1." > $TMP_DIR/auto_mail
	mail -s "autoDaily timed out in createReport -d \$1" $OP_ADDRESS < $TMP_DIR/auto_mail
        echo "TIMEOUT: createReport killed after $SLEEP_MAX secs."
fi
EOT
chmod a+x $TMP_DIR/wrappedCreateReport
$TMP_DIR/wrappedCreateReport $1
}

# =============================================================================
# 0.7 procedure quickUpdate: quick update of single in dfoMonitor marked by 
#     <!--AUTOMESSAGE-->
# =============================================================================
quickUpdate(){
UPDATE=`cat $TMP_DIR/message`
sed -i -e "/<!--AUTOMESSAGE-->/s|^.*|<td COLSPAN=3><font color=#FFFFFF size=2>${UPDATE}</td> <!--AUTOMESSAGE-->|" $DFO_MON_DIR/dfoMonitor.html
BROWSER_ACTIVE=`ps -wfC $DFO_BROWSER | grep "$USER " | grep -v CMD`
if [ "Q$BROWSER_ACTIVE" != "Q" ]
then
	$DFO_GUI_DIR/refresh_browser $DFO_MON_DIR/dfoMonitor.html 
fi
}

# =============================================================================
# 0.8 Action symbols for dfoMonitor
# =============================================================================

ACTION="<img src=$IMG_URL/sq1.png width=5 height=5 border=0 title=executing>"
DONE="<img src=$IMG_URL/sq9.png width=5 height=5 border=0 title=done>"
PENDING="<img src=$IMG_URL/sqg.png width=5 height=5 border=0 title=waiting>"

# =============================================================================
# 0.9 checkOtherAutoDaily: procedure to check for previous unfinished autoDaily
# =============================================================================

checkOtherAutoDaily(){
if [ $FORCED_MODE = YES ]
then
# in forced mode, we don't wait for the other/earlier instance but exit
	CHECK_OTHER=`ps -wfC autoDaily | grep -v CMD | grep $USER | wc -l`
	if [ $CHECK_OTHER -gt 2 ]
	then
		TIMESTAMP=`date -u +%Y-%m-%d" "%H:%M:%S`
		echo "$TIMESTAMP ***INFO: forced processing terminated since other job already started for `whoami`@`hostname`." > $TMP_DIR/auto_mail
		echo "           Please wait for results." >> $TMP_DIR/auto_mail
		mail -s "Forced autoDaily terminated since other job already running" $OP_ADDRESS < $TMP_DIR/auto_mail
		rm -f $TMP_DIR/HC_ENFORCE.lock
		exitProcs terminated
		exit 10
	fi
fi

CHECK_OTHER=`ps -wfC autoDaily | grep -v CMD | grep $USER | wc -l`
TIME=0

while [ $CHECK_OTHER -gt 2 ]
do
	CHECK_OTHER=`ps -wfC autoDaily | grep -v CMD | grep $USER | wc -l` 
	writeLog "waiting for another autoDaily process ($CHECK_OTHER) to finish (check again after 60 sec) ..."
	if [ $FORCED_MODE = YES ]
	then
		scpLog
	fi
	sleep 60

# still more than 2 instances --> exit (to avoid a deadlock)
	CHECK_OTHER=`ps -wfC autoDaily | grep -v CMD | grep $USER | wc -l` 
	if [ $CHECK_OTHER -gt 2 ]
	then
		TIMESTAMP=`date  +%Y-%m-%d" "%H:%M:%S`
		echo "[$TIMESTAMP] ***ERROR: more than 2 autoDaily processes found. Exit." >> $LOG_FILE
		echo "[$TIMESTAMP]	autoDaily: more than 2 instances found." > $TMP_DIR/auto_mail
		mail -s "autoDaily timed out while waiting for another autoDaily process" $OP_ADDRESS < $TMP_DIR/auto_mail
		exitProcs timeout
		exit
	fi

	TIME=`echo $TIME 60 | awk '{print $1+$2}'`
	
	if [ $TIME -ge $OTHER_TIMEOUT1 ]
	then
		TIMESTAMP=`date  +%Y-%m-%d" "%H:%M:%S`
		echo "[$TIMESTAMP] ***ERROR: Timeout upon waiting for another autoDaily process." >> $LOG_FILE
		echo "[$TIMESTAMP] 	autoDaily: timed out upon waiting for $OTHER_TIMEOUT minutes for another autoDaily.
No operation done." > $TMP_DIR/auto_mail
		mail -s "autoDaily timed out while waiting for another autoDaily process" $OP_ADDRESS < $TMP_DIR/auto_mail
		exitProcs timeout
		exit
	fi
done
}

# =============================================================================
# 0.10 calc_exectime: compute execution time and write TEXEC (in minutes) 
#      to the QC1 database "exec_time" [MNe] 
# =============================================================================

calc_exectime(){

# determine START_TIME, END_TIME
START_TIME=`grep "autoDaily started" $LOG_FILE | awk '{print $1,$2}' | sed "s/:$//" | sed "s/\[//" | sed "s/\]//"`
if [ "Q$START_TIME" = Q ]
then
	START_TIME=`grep "Forced processing started" $LOG_FILE | awk '{print $1,$2}' | sed "s/:$//" | sed "s/\[//" | sed "s/\]//"`
fi

END_TIME=`grep "autoDaily finished" $LOG_FILE | awk '{print $1,$2}' | sed "s/:$//" | sed "s/\[//" | sed "s/\]//"`
if [ "Q$END_TIME" = Q ]
then
	END_TIME=`grep "Forced processing finished" $LOG_FILE | awk '{print $1,$2}' | sed "s/:$//" | sed "s/\[//" | sed "s/\]//"`
fi

if [ "Q$START_TIME" = "Q" ] || [ "Q$END_TIME" = "Q" ]
then
	TEXEC=0
else
        START_MIN=`echo $START_TIME | sed "s/:/ /g" | sed "s/-//g" | awk '{print $2*60.+$3+$4/60.}'`
        DATE1=`echo $START_TIME | sed "s/-//g" | awk '{print $1}'`

        END_MIN=`echo $END_TIME | sed "s/:/ /g" | sed "s/-//g" | awk '{print $2*60.+$3+$4/60.}'`
        DATE2=`echo $END_TIME | sed "s/-//g" | awk '{print $1}'`

        if [ $DATE1 = $DATE2 ]
        then
                TEXEC=`echo $END_MIN $START_MIN | awk '{print $1-$2}'`
        else
                TEXEC=`echo $END_MIN $START_MIN | awk '{print $1+1440-$2}'`
        fi
fi

CIVIL_DATE=$DATE1
LOCAL_TIME=`echo $START_TIME | awk '{print $2}'`

# compute the current MJD as is needed for the exec_time QC1 table:
TODAY=`date -u +%Y-%m-%d`
TODAY_MJD=`qcdate $TODAY`

TODAY_HH=`date -u +%H`
TODAY_MM=`date -u +%M`
TODAY_SS=`date -u +%S`
TODAY_MJD1=`echo $TODAY_MJD $TODAY_HH $TODAY_MM $TODAY_SS | awk '{printf"%12.6f\n", $1+($2+$3/60.+$4/3600.)/24.}'`

echo "qc1Ingest -table exec_time -civil_date $CIVIL_DATE -local_time $LOCAL_TIME -mjd_obs $TODAY_MJD1 -exec_time $TEXEC -version $TOOL_VERSION -instrument $DFO_INSTRUMENT -hostname $DFO_MACHINE -tool $TOOL_NAME -process_load $PROCESS_LOAD " >> $DFO_TREND_DIR/dfo_exec_time

qc1Ingest -table exec_time -civil_date $CIVIL_DATE -local_time $LOCAL_TIME -mjd_obs $TODAY_MJD1 -exec_time $TEXEC -version $TOOL_VERSION -instrument $DFO_INSTRUMENT -hostname $DFO_MACHINE -tool $TOOL_NAME -process_load $PROCESS_LOAD | grep -v "Connecting to" | grep -v "duplicated rows" | sed "s/Successfully.*/- execution time: $TEXEC min (ingested into exec_time)/" >> $FINAL_LOG
}

# =============================================================================
# 0.11 createPHP: create the php script hcForcedRefresh.php and upload to qcweb
# =============================================================================
createPHP(){
# the following two variables must be one-liners in order to be properly replaced by calChecker!
HC_WHY="If you have very recent new data for which you need a quick QC response, launch here a forced refresh of \$instrSelect calibration processing on the QCG server. At the end this HC monitor page, and all other affected ones, will be refreshed. The new versions will be updated both  on the HQ and the PL server. <p> Depending on the amount of data to be processed, and on the current load on the QCG server, this may take a couple of minutes or longer.<p> Note that the current last template gets processed only when it is older than one hour (to avoid processing of yet incomplete datasets). "
HC_HOW="Enter your email address below (it will be used to send you a confirmation once the processing is finished), then submit."

cat > $TMP_DIR/hcForcedRefresh.php <<EOT
<html>
<title>Forced refresh of $DFO_INSTRUMENT HC monitor</title>
<style TYPE="text/css">
<!--
.hclabel {
  background-color: $TITLE_COLOR;
  color: #FFF;
  font-size: x-large;
  text-decoration: none;
  vertical-align: bottom;
  text-align: left;
  text-decoration: none;
}

.nounder {
  text-decoration: none;
}
-->
</style>

<?php
/*
This php script is created by $TOOL_NAME v${TOOL_VERSION}.
It creates the cgi interface to launch a forced refresh of the HC monitor, 
calling 'autoDaily -F'.
*/

//==============================================================
// 1. check if form elements are properly filled:
\$instrSelect  = \$_GET[instrSelect];    // name of instrument for which HC monitor is to be refreshed
\$reportMode   = \$_GET[reportMode];     // for confirmation

// no report encoded: error (cannot happen with form)
if (\$instrSelect=='') {
  die ('<img src=/observing/dfo/quality/ALL/img/php-warning.png height=30 width=30> <font size=2>Error: no instrument name specified.<br><a href="javascript:back();">back</a></font>');
}

//==============================================================
// 2. reportMode empty --> we start dialog
if (\$reportMode=='') {
echo <<<EOT
<table style="border-collapse: collapse;" border="1" bordercolor="#ffffff" cellpadding="0" cellspacing="0">
  <tr align="left" valign="bottom">
    <td bgcolor="${TITLE_COLOR}" height="50" width="50"><b><font color="#ffffff" size="3">HC</font></b></td>
    <td valign="bottom"><font size=+2>Forced refresh of \${instrSelect} HC monitor </font></td>
   </tr>
</table><p>

<table style="font-size:small;" width=500>
<tr><td>
<font color=${TITLE_COLOR}>Why?</font> 
EOT

echo "$HC_WHY" | sed "s/^.*/& <!--HC_WHY-->/" >> $TMP_DIR/hcForcedRefresh.php

cat >> $TMP_DIR/hcForcedRefresh.php <<EOT
<p>

<font color=${TITLE_COLOR}>How?</font> 
EOT

echo "$HC_HOW" | sed "s/^.*/& <!--HC_HOW-->/" >> $TMP_DIR/hcForcedRefresh.php

cat >> $TMP_DIR/hcForcedRefresh.php <<EOT
</td></tr></table>

<form method="get" action="./hcForcedRefresh.php">
<input type="hidden" name="reportMode" value="launched">
<input type="hidden" name="instrSelect" value="\${instrSelect}">

<table style="font-size:small;" width=500>
  <tr style="font-size:small; color:${TITLE_COLOR};" bgcolor=#cccccc>
    <td><b>Your email address</b> <i><font size=1>(e.g. $PSO_EMAIL | $QC_ADDRESS | $OP_ADDRESS):</font></i></td>
  </tr>
  <tr bgcolor="#ffff99">
    <td colspan="2">
      <input name="commentAuthor" type="text" value="&lt;your_email&gt;@eso.org" size="30" maxlength="30">
    </td>
  </tr></table><p>
  <input type="submit" value="Submit" class="hclabel"> &nbsp; &nbsp;
  <input type="reset"  value="Reset">
  <a href="javascript:self.close();" class="nounder"><input type="button" value="close window"></a>
</form>

EOT;
};

if (\$reportMode=='launched') {
  \$commentAuthor = str_replace("<","&lt;",\$_GET[commentAuthor]);
  if (\$commentAuthor=='&lt;your_email>@eso.org'||\$commentAuthor=='') {
  die ('<img src=/observing/dfo/quality/ALL/img/php-warning.png height=30 width=30> <font size=2>Please enter a valid <b>email address</b>. <br><br><a href="hcForcedRefresh.php?instrSelect=$DFO_INSTRUMENT" class="nounder"><input type="button" value="back to form" class="back"></a></font>');
}

echo <<<EOT
<table style="border-collapse: collapse;" border="1" bordercolor="#ffffff" cellpadding="0" cellspacing="0">
  <tr align="left" valign="bottom">
    <td bgcolor="${TITLE_COLOR}" height="50" width="50"><b><font color="#ffffff" size="3">HC</font></b></td>
    <td valign="bottom"><font size=+2>Forced refresh of \${instrSelect} HC monitor </font></td>
   </tr>
</table><p>

<font size=2>
Please confirm that you want to launch a forced refresh of the HC monitor by <i>\${commentAuthor}</i>, or close to exit:
<form method="get" action="./hcForcedRefresh.php">
<input type="hidden" name="reportMode" value="confirmed">
<input type="hidden" name="instrSelect" value="\${instrSelect}">
<input type="hidden" name="commentAuthor" value="\${commentAuthor}">

<input type="submit" value="Confirm" class="hclabel"> &nbsp; &nbsp;
<a href="javascript:self.close();" class="nounder"><input type="button" value="close window"></a>
</form>
</font>
EOT;
};

if (\$reportMode=='confirmed') {
  \$data1 = "<html> <META HTTP-EQUIV=\"Refresh\" CONTENT=\"10\">  <META HTTP-EQUIV=\"Cache-Control\" CONTENT=\"NO-CACHE\"> <pre>\n";
  \$data2 = "Processing not yet started, waiting for cronjob on Garching dfo machine $HOSTNAME to catch this signal (can take a few minutes) ...</pre></html>\n";
  \$new_file = "autolog.html";
  if (!\$file_new = fopen(\$new_file,"w"))
    echo "error: can't open \$new_file ...\n";
  fwrite(\$file_new, \$data1);
  fwrite(\$file_new, \$data2);
  fclose(\$file_new);

  \$commentAuthor = \$_GET[commentAuthor]; 
  \$data = \$instrSelect." ".\$commentAuthor."\n";
  \$new_file = "HC_ENFORCE";
  if (!\$file_new = fopen(\$new_file,"a"))
    die ('<img src=/observing/dfo/quality/ALL/img/php-warning.png height=30 width=30> <font size=2>Error: start with proper dialog.<br><a href="javascript:back();">back</a></font>');
  \$file_new = fopen(\$new_file,"a");
  fwrite(\$file_new, \$data);
  fclose(\$file_new);

echo <<<EOT
<table style="border-collapse: collapse;" border="1" bordercolor="#ffffff" cellpadding="0" cellspacing="0">
  <tr align="left" valign="bottom">
    <td bgcolor="${TITLE_COLOR}" height="50" width="50"><b><font color="#ffffff" size="3">HC</font></b></td>
    <td valign="bottom"><font size=+2>Forced refresh of \${instrSelect} HC monitor </font></td>
   </tr>
</table><p>

<font size=2>
... launched. <p>
Watch the progress behind this <a href="./autolog.html">link</a>.<p>
<a href="javascript:self.close();" class="nounder"><input type="button" value="close window"></a>
</font>
EOT;
};
?>
</html>
EOT
}

# =============================================================================
# 1. Get data for new dates
# 1.1 Setup logging
# =============================================================================

# execution DATE
DATE=`date +%Y-%m-%d`

if [ ! -d $DFO_MON_DIR/AUTO_DAILY ]
then
        mkdir $DFO_MON_DIR/AUTO_DAILY
fi

# log files: during execution we log into $LOG_FILE, which is appended at the end to $FINAL_LOG
FINAL_LOG="$DFO_MON_DIR/AUTO_DAILY/AD_${DATE}.log"
if [ $FORCED_MODE = YES ]
then
	LOG_FILE=$TMP_DIR/forcedlog
else
	LOG_FILE=$TMP_DIR/autolog
fi

rm -f $LOG_FILE 

# we check for other instance running; if so, we wait (unless this is a FORCED call then we exit) ...
checkOtherAutoDaily

PROCESS_LOAD=`uptime | sed "s/^.*load average://" | awk '{print $1}' | sed "s/,//"`

# create and upload php script for forced refresh
createPHP
scp -p $TMP_DIR/hcForcedRefresh.php $DFO_WEB_SERVER:${WEB_DIR}/php/ 2>/dev/null 1>/dev/null

if [ $FORCED_MODE = YES ]
then
	AUTHOR=`cat $TMP_DIR/HC_ENFORCE | tail -1 | awk '{print $2}'`
	cat > $LOG_FILE <<EOT
<html>
<META HTTP-EQUIV="Refresh" CONTENT="10">
<META HTTP-EQUIV="Cache-Control" CONTENT="NO-CACHE">
<pre>
EOT
fi

echo "========================================================================" >> $LOG_FILE

case $FORCED_MODE in
 "YES" ) writeLog "Forced processing started for $DFO_INSTRUMENT by $AUTHOR as `whoami`@`hostname` ..." 
	 writeLog "All times in UT" ;;
 "NO"  ) writeLog "autoDaily started ..." ;;
esac

if [ $EXEC_MODE = FULL ] 
then
	if [ $FORCED_MODE = NO ]
	then
		echo "Execution plan:
 1. call ngasMonitor to search for new files, create data report
 2. new files found: 
   2.1 check completeness, create CALIB ABs
   2.2 create CALIB jobs
   2.3 process ABs and create QC reports
 3. update HC plots 
-------------------------------------------------------------------" >> $LOG_FILE
	elif [ $FORCED_MODE = YES ]
	then
		echo " Execution plan:
 1. search for new files in NGAS
 2. if new files found: 
   2.1 update data report
   2.2 create processing jobs
   2.3 execute jobs
   2.4 extract QC information
   2.5 update HC monitor
 3. if no new files found: exit"  >> $LOG_FILE
	scpLog
	fi
fi
 
# =============================================================================
# 1.2 Check for disk space; exit if insufficient
# =============================================================================

if [ "Q$DISK" != "Q" ] && [ "Q$DISK_SPACE" != "Q" ] && [ $EXEC_MODE = FULL ]
then
	DISK_FRAC=`df $DISK | grep -v File | sed "s/\%//" | awk '{printf"%3s\n",$5}'`
	if [ "$DISK_FRAC" -ge "$DISK_SPACE" ]
	then
		echo "  $DISK: occupied volume ($DISK_FRAC) exceeds configured threshold ($DISK_SPACE)." >> $LOG_FILE
		TIMESTAMP=`date +%Y-%m-%d" "%H:%M:%S`
		writeLog "autoDaily stopped."
		echo "[$TIMESTAMP] 	autoDaily: occupied volume ($DISK_FRAC) on $DISK exceeds configured threshold ($DISK_SPACE). autoDaily stopped." > $TMP_DIR/auto_mail
		mail -s "autoDaily stopped because of disk space problem" $OP_ADDRESS < $TMP_DIR/auto_mail

		if [ $FORCED_MODE = YES ]
		then
			scpLog
		fi
		dfoMonitor -a -q 2>/dev/null
		cat $LOG_FILE | grep -v "^<" >> $FINAL_LOG
		rm -f $TMP_DIR/message
		exit
	fi
fi

if [ $FORCED_MODE = YES ]
then
	scpLog
fi

# =============================================================================
# 1.3 Refresh dfoMonitor
# =============================================================================

echo "$PENDING $PENDING $PENDING $PENDING $PENDING $PENDING $PENDING starting" > $TMP_DIR/message
case $FORCED_MODE in
 "YES" ) dfoMonitor -m -q 2>/dev/null ;;
 "NO"  ) dfoMonitor -m -q 2>/dev/null >> $LOG_FILE ;;
esac

# =============================================================================
# 1.4 Create reports (to update AVailable flag)
# =============================================================================
# find TODAY
CUR_HOUR=`date -u +%H`

# DFO_OFFSET default: 9
if [ "Q$DFO_OFFSET" = "Q" ]
then
	DFO_OFFSET=9
fi

SWITCH_HOUR=`echo $DFO_OFFSET | awk '{print $1+12'}`
TODAY=`date -u +%Y-%m-%d`
if [ $CUR_HOUR -lt $SWITCH_HOUR ]
then
	TODAY=`qcdate $TODAY -1`
fi

YESTERDAY=`qcdate $TODAY -1`

# 1. ngasMonitor: check for new dates
if [ $EXEC_MODE = FULL ]
then
	echo "$DONE $ACTION $PENDING $PENDING $PENDING $PENDING $PENDING calling ngasMonitor" > $TMP_DIR/message
	quickUpdate 
	echo "-------------------------------------------------------------------" >> $LOG_FILE
	case $FORCED_MODE in
	 "YES" ) writeLog "1. query archive database for new files ..." 
		 TIMESTAMP=`date -u +%Y-%m-%d" "%H:%M:%S`
		 ngasMonitor | grep -v "query archive" | sed "s/^.*/$TIMESTAMP	&/" >> $LOG_FILE
		 scpLog ;;
	 "NO"  ) echo "1.1 call ngasMonitor ... "    >> $LOG_FILE
		 ngasMonitor | sed "s/^.*/       &/" >> $LOG_FILE
		 echo "" >> $LOG_FILE ;;
	esac

# NO_NGAS:
else
	echo "-------------------------------------------------------------------" >> $LOG_FILE
	echo "[1.1 no ngasMonitor called]" >> $LOG_FILE
fi

# ngasMonitor delivers $DFO_MON_DIR/list_data_dates; we move it to $TMP_DIR.
# We enforce to try at least CURRENT date.
# Unless FLASHBACK=YES, we suppress all dates before $YESTERDAY
# (since this may lead to unwanted reprocessing of already certified CALIB ABs)
if [ ! -s $DFO_MON_DIR/list_data_dates ]
then
	echo $TODAY > $DFO_MON_DIR/list_data_dates
fi

# BWo v2.4: add YESTERDAY so that ABs are created for headers
# that might not have been available in the last hour before the date switch
if [ $EXEC_MODE = FULL ]
then
	if [ $CUR_HOUR = 21 ] || [ $CUR_HOUR = 22 ]
	then
		echo $YESTERDAY >> $DFO_MON_DIR/list_data_dates
		sort -u $DFO_MON_DIR/list_data_dates > $DFO_MON_DIR/list_data_dates2
		mv $DFO_MON_DIR/list_data_dates2 $DFO_MON_DIR/list_data_dates
	fi
fi

rm -f $TMP_DIR/list_data_dates

# FLASHBACK=NO in incremental mode
if [ $FLASHBACK = NO ] && [ $EXEC_MODE = FULL ]
then
	rm -f $TMP_DIR/ad_flashback $TMP_DIR/ad_flashback1
	egrep -v "${TODAY}|${YESTERDAY}|Datab" $DFO_MON_DIR/list_data_dates > $TMP_DIR/ad_flashback
	if [ -s $TMP_DIR/ad_flashback ]
	then
		echo "autoDaily: found data which are not processed because of FLASHBACK=YES:" > $TMP_DIR/ad_flashback1
		cat $TMP_DIR/ad_flashback >> $TMP_DIR/ad_flashback1
		cat >> $TMP_DIR/ad_flashback1 <<EOT

Data report(s) have been updated but no further processing done (left to you).

Proposed procedure to continue:

- if CALIB ABs have already been created but failed because of missing files:
  - "certify+move [CALIB]"   : execute corresponding ABs (so far marked red) and QC reports, then certify

- if no CALIB AB so far processed at all: call 'autoDaily -D', enter date(s), lean back
EOT
		mail -s "autoDaily: found data which are not processed because of FLASHBACK=YES" $OP_ADDRESS < $TMP_DIR/ad_flashback1
		cat $TMP_DIR/ad_flashback1 >> $LOG_FILE

# update data report for those dates
		for D in `cat $TMP_DIR/ad_flashback`
		do
			createWrapper $D | sed "s/^.*/	&/" >> $LOG_FILE
		done
	fi
			
	egrep "${TODAY}|${YESTERDAY}" $DFO_MON_DIR/list_data_dates > $TMP_DIR/list_data_dates
# NO_NGAS:
else
	mv $DFO_MON_DIR/list_data_dates $TMP_DIR/list_data_dates
fi

# no new dates found: exit
if [ ! -s $TMP_DIR/list_data_dates ]
then
	writeLog "3.1 Exit."
	if [ $FORCED_MODE = YES ]
	then
		 scpLog
	fi

	exitProcs
	calc_exectime
	exit
fi
	
# new dates found: continue with workflow (even if no new NGAS data found, it could be that some ABs failed last time and can be processed this time)
if [ $FORCED_MODE = NO ]
then
	echo "
New date(s) found: "  >> $LOG_FILE
	cat $TMP_DIR/list_data_dates >> $LOG_FILE
	echo "" >> $LOG_FILE
else
	writeLog "2. Attempting to process new files, or files that became available since the last execution ..."
	scpLog
fi

echo "$DONE $DONE $ACTION $PENDING $PENDING $PENDING $PENDING updating data report" > $TMP_DIR/message
quickUpdate 
	
for D in `cat $TMP_DIR/list_data_dates | grep -v Datab`
do

# create data reports (AV flag may need update even if headers are the same as before)
	if [ $FORCED_MODE = NO ]
	then	
		echo "-------------------------------------------------------------------" >> $LOG_FILE
		echo "1.2 updating data reports [createReport] ..." >> $LOG_FILE
		createWrapper $D | sed "s/^.*/	&/" >> $LOG_FILE
	else
		writeLog "2.1 updating data report ..."
		scpLog
		createWrapper $D | sed "s/^.*/  &/" 1>/dev/null
	fi
	
# check for AB monitor for $D; if not existing, create empty one (required for navigation)
	if [ ! -s $DFO_MON_DIR/status_${D}.html ]
	then
		echo "  - no AB monitor found, calling getStatusAB -d $D ..." >> $LOG_FILE
		getStatusAB -d $D >> $LOG_FILE
		echo "  " >> $LOG_FILE
	fi
done

# =============================================================================
# 2.0 Data availability 
# =============================================================================

if [ $FORCED_MODE = NO ]
then	
	echo "" >> $LOG_FILE
	echo "-------------------------------------------------------------------" >> $LOG_FILE
	echo "2.1 Checking for completeness of days ..." >> $LOG_FILE 
fi

rm -f $TMP_DIR/list_proc_dates $TMP_DIR/list_incompl_dates

# =============================================================================
# 2.1 find available days
# Note:
# 	raw_Complete 	here means "all files available for download from NGAS!"
# 	raw_Incomplete  means "not yet all files available for download!"
# =============================================================================

echo "$DONE $DONE $DONE $ACTION $PENDING $PENDING $PENDING check completeness" > $TMP_DIR/message
quickUpdate 

cp $TMP_DIR/list_data_dates $TMP_DIR/list_proc_dates

for D in `cat $TMP_DIR/list_data_dates | grep -v Datab`
do
	if [ ! -d $DFO_RAW_DIR/$D ]
	then
		mkdir $DFO_RAW_DIR/$D
	fi

# write flag into DFO_STATUS

	if [ Q$COMPL_FLAG = Qraw_Complete ]
	then
		rm -f $DFO_MON_DIR/list_request_$D.txt
	fi

# write COMPL_FLAG (TODAY: only when it's over)
	if [ Q$COMPL_FLAG != Q ]
	then
		UPDATE=`date +%Y-%m-%d"T"%H:%M:%S`
		echo "$COMPL_FLAG $D $UPDATE" >> $DFO_MON_DIR/DFO_STATUS
	fi
done

# =============================================================================
# 2.2 Checking $TMP_DIR/list_proc_dates (dates to process): existence?
# =============================================================================

if [ ! -s $TMP_DIR/list_proc_dates ]
then
	writeLog "No complete data set found for processing. Exit."
	TIMESTAMP=`date -u +%Y-%m-%d" "%H:%M:%S`
	echo "[$TIMESTAMP] 	autoDaily: no complete data set found for processing. Exit." > $TMP_DIR/auto_mail
	mail -s "autoDaily stopped because no complete data set found for processing" $OP_ADDRESS < $TMP_DIR/auto_mail
	exitProcs
	calc_exectime
	exit
fi

sort $TMP_DIR/list_proc_dates > $TMP_DIR/list_proc_dates1
mv $TMP_DIR/list_proc_dates1 $TMP_DIR/list_proc_dates

# =============================================================================
# 3. createAB: CALIB ABs
# 3.1 Check for other processes running; checked: 
#     createAB, vultur_exec_cascade, processQC
# =============================================================================

if [ $FORCED_MODE = NO ]
then
	echo "" >> $LOG_FILE
	echo "-------------------------------------------------------------------" >> $LOG_FILE
fi

# BWo v2.4
#CHECK_OTHER1: checks for recreateAB; this is interactive and will be killed
CHECK_OTHER1=`ps -wfC createAB      	  | grep $USER | grep -v CMD | grep "\-r"`

CHECK_OTHER2=`ps -wfC createAB      	  | grep $USER | grep -v CMD | grep -v "\-r"` 
CHECK_OTHER3=`ps -wfC vultur_exec_cascade | grep $USER | grep -v CMD`
CHECK_OTHER4=`ps -wfC processQC     	  | grep $USER | grep -v CMD`
CHECK_OTHER=`echo ${CHECK_OTHER2}${CHECK_OTHER3}${CHECK_OTHER4}`
TIME=0

# discover recreateAB (createAB  ... -r)
if [ "Q$CHECK_OTHER1" != Q ]
then
	INSTANCE=`echo $CHECK_OTHER1 | awk '{print $2}'`
	kill -9 $INSTANCE 1>/dev/null
	TIMESTAMP=`date -u +%Y-%m-%d" "%H:%M:%S`
	echo "[$TIMESTAMP]	autoDaily: instance killed:" > $TMP_DIR/auto_mail
	echo "$CHECK_OTHER1" >> $TMP_DIR/auto_mail
	mail -s "AUTO_DAILY: killed instance of 'createAB -r'" $OP_ADDRESS <$TMP_DIR/auto_mail
fi 

while [ "Q$CHECK_OTHER" != "Q" ]
do
	writeLog "waiting for other dfos processes to finish ..."
	if [ $FORCED_MODE = YES ]
	then
		scpLog
	fi

	sleep 60
	TIME=`echo $TIME 60 | awk '{print $1+$2}'`
	
	if [ $TIME = $OTHER_TIMEOUT1 ]
	then
		writeLog "***ERROR: Timeout upon waiting for createAB or other tools."
		TIMESTAMP=`date -u +%Y-%m-%d" "%H:%M:%S`
		echo "[$TIMESTAMP] 	autoDaily: timed out upon waiting for createAB or other tools." > $TMP_DIR/auto_mail
		mail -s "autoDaily timed out while waiting for createAB or other tools" $OP_ADDRESS < $TMP_DIR/auto_mail
		exitProcs timeout
		exit
	fi
	# BWo: check moved to the end of loop
	#CHECK_OTHER1=`ps -wfC processPreImg 	  | grep $USER | grep -v CMD` 
	CHECK_OTHER2=`ps -wfC createAB      	  | grep $USER | grep -v CMD` 
	CHECK_OTHER3=`ps -wfC vultur_exec_cascade | grep $USER | grep -v CMD`
	CHECK_OTHER4=`ps -wfC processQC     	  | grep $USER | grep -v CMD`
	CHECK_OTHER=`echo ${CHECK_OTHER1}${CHECK_OTHER2}${CHECK_OTHER3}${CHECK_OTHER4}`
done

LAST_DATE=`cat $TMP_DIR/list_proc_dates | sort | tail -1`

# =============================================================================
# 3.2 call $PGI_PREPROC if defined 
# =============================================================================

if [ $PGI_PREPROC != NONE ]
then
	eval "$DFO_BIN_DIR/$PGI_PREPROC"
fi

# =============================================================================
# 3.3 call createAB (only for complete dates) 
# =============================================================================

if [ $FORCED_MODE = YES ]
then
	writeLog "2.2 Create processing jobs ..."
	scpLog
else
	echo "2.2 Creating ABs for mode CALIB ..." >> $LOG_FILE
fi

echo "$DONE $DONE $DONE $DONE $ACTION $PENDING $PENDING calling createAB" > $TMP_DIR/message
dfoMonitor -m -q 2>/dev/null >> $LOG_FILE &

for D in `cat $TMP_DIR/list_proc_dates`
do
	if [ $FORCED_MODE = NO ]
	then
		TIMESTAMP=`date -u +%Y-%m-%d" "%H:%M:%S`
		echo "----------------------------------------------------------------------------" | sed "s/^.*/	&/"  >> $LOG_FILE
  		echo "[$TIMESTAMP]        createAB -m CALIB -d $D -a -i ..." | sed "s/^.*/	&/" >> $LOG_FILE
# suppress "cannot access" messages from ABbuilder
		createAB  -m CALIB -d $D -a -i | sed "s/^.*/	&/" 2>/dev/null >> $LOG_FILE 
	else
		createAB  -m CALIB -d $D -a -i | sed "s/^.*/	&/" 1>/dev/null 
		if [ -s $DFO_MON_DIR/AB_list_CALIB_$D ]
		then
			CHECK_NEW=`cat $DFO_MON_DIR/AB_list_CALIB_$D | grep -v DONE | awk '{print $1}' | head -1`
			if [ Q$CHECK_NEW != Q ]
			then
				writeLog "    New jobs (NAME	RAW_TYPE	SETUP):"
				TIMESTAMP=`date -u +%Y-%m-%d" "%H:%M:%S`
				cat $DFO_MON_DIR/AB_list_CALIB_$D | grep -v DONE | sed "s/ /	/g" | sed "s/^.*/$TIMESTAMP		&/" >> $LOG_FILE
				N_JOBS=`cat $DFO_MON_DIR/AB_list_CALIB_$D | grep -v DONE | wc -l`
			else
				writeLog "    ... no new jobs created. Exit."
				echo "-------------------------------------------------------------------" >> $LOG_FILE
				scpLog
				exit
			fi
			scpLog
		fi
	fi
done

# =============================================================================
# 3.4 Management of BADQUAL calibrations (done here for speed: we don't need to wait
#     until processing is done)
#     Create/update list_calBadQual.dat of ABs from $TODAY and scp to $QCWEB_URL,
#     for calChecker calBadQual.php interface. This part relates to calChecker #1.2.2.
#     Strategy:
#     'autoDaily' updates list with *new* ABs from DFOS_OPS (because PSO wants 
#     to hide any calibrations, not just the ones for 'calChecker'). 
#     'calChecker' updates the BAD flag (to HIDDEN) by registering
#     the BAD calibs in the CAL_BADQUAL file.
# =============================================================================

rm -f $TMP_DIR/list_calBadQual*
touch $TMP_DIR/list_calBadQual.dat

# download list from qcweb; add new ABs; merge and upload to qcweb
scp -o BatchMode=yes ${DFO_WEB_SERVER}:${WEB_DIR}/CAL/php/list_calBadQual.dat $TMP_DIR/list_calBadQual_down.dat 1>/dev/null 2>/dev/null

# find all DFOS_OPS ABs for $TODAY
CHECK_TODAY=`ls $DFO_AB_DIR | grep \.ab | head -1`
if [ Q$CHECK_TODAY != Q ]
then
# call/refresh getStatusB, to have all new ABs available
	getStatusAB -d $TODAY 

# strategy: use content of .tab files since they are structured; get all .tab files; check them for being included in $TODAY's AB monitor
	for ATAB in `ls $DFO_AB_DIR | grep .tab`
	do
		AB=`echo $ATAB | sed "s/.tab/.ab/"`
		CHECK_TODAY=`grep $AB $DFO_MON_DIR/status_${TODAY}.html`
		if [ "Q$CHECK_TODAY" != Q ]
		then
			cat $DFO_AB_DIR/$ATAB | awk '{print today,td,$17,td,$20,td,"<a href="url"/"today"/"$6">"$6"</a>"}' today=$TODAY td="</td><td>" url=${QCWEB_URL} >> $TMP_DIR/list_calBadQual.dat
			cat $DFO_AB_DIR/$ATAB | awk '{print $6}' >> $TMP_DIR/list_calBadQual.dat1
		fi
	done

# Now compare list_calBadQual_down.dat and list_calBadQual.dat, add new ABs from list_calBadQual.dat to list_calBadQual_down.dat,
# remove outdated ABs from list_calBadQual_down.dat
	if [ -s $TMP_DIR/list_calBadQual.dat ]
	then
		for AB in `cat $TMP_DIR/list_calBadQual.dat1`
		do
			CHECK_EXIST=`grep $AB $TMP_DIR/list_calBadQual_down.dat 2>/dev/null | grep -v "^#" | tail -1`
# new?
			if [ "Q$CHECK_EXIST" = Q ]
			then
# --> add
				grep "$AB" $TMP_DIR/list_calBadQual.dat >> $TMP_DIR/list_calBadQual_down.dat
			fi
		done
	fi

	for AB in `cat $TMP_DIR/list_calBadQual_down.dat | awk '{print $8}' | sed "s|</a>.*||" | sed "s|>| |" | awk '{print $2}' | sed "/^$/d"`
	do
		CHECK_EXIST=`grep $AB $TMP_DIR/list_calBadQual.dat | head -1`
		if [ "Q$CHECK_EXIST" = Q ]
		then
			sed -i -e "/$AB/d" $TMP_DIR/list_calBadQual_down.dat
		fi
	done

	if [ -s $TMP_DIR/list_calBadQual_down.dat ]
	then
		mv $TMP_DIR/list_calBadQual_down.dat $TMP_DIR/list_calBadQual.dat
	fi

# none found: new date, previous deleted
else
	rm -f $TMP_DIR/list_calBadQual_down.dat
fi

ssh -o BatchMode=yes $DFO_WEB_SERVER "rm -f ${WEB_DIR}/CAL/php/list_calBadQual.dat"
# scp 
if [ -s $TMP_DIR/list_calBadQual.dat ]
then
	chmod a+w $TMP_DIR/list_calBadQual.dat 
	scp -o BatchMode=yes $TMP_DIR/list_calBadQual.dat ${DFO_WEB_SERVER}:${WEB_DIR}/CAL/php/ 1>/dev/null 2>/dev/null
fi

# =============================================================================
# 4. Jobs
# 4.1 createJob CALIB jobs
# =============================================================================

if [ $FORCED_MODE = NO ]
then
	echo "" >> $LOG_FILE
	echo "-------------------------------------------------------------------" >> $LOG_FILE

	echo "2.3 Creating processing jobs for mode CALIB ..." >> $LOG_FILE
	echo "" >> $LOG_FILE
fi

if [ -s $DFO_JOB_DIR/$JOB_FILE_NAME ]
then
	mv  $DFO_JOB_DIR/$JOB_FILE_NAME $DFO_JOB_DIR/${JOB_FILE_NAME}.backup
fi

cat > $DFO_JOB_DIR/$JOB_FILE_NAME <<EOT
# refresh dfoMonitor
dfoMonitor -m -q 2>/dev/null
EOT

for D in `cat $TMP_DIR/list_proc_dates`
do
	rm -f $DFO_JOB_DIR/execHC1_CALIB_$D 
	if [ $FORCED_MODE = NO ]
	then
		echo "-------------------------------------------------------------------" >> $LOG_FILE
		TIMESTAMP=`date +%Y-%m-%d" "%H:%M:%S`
		echo "[$TIMESTAMP]	createJob -m CALIB -d $D -F" >> $LOG_FILE
		createJob -m CALIB -d $D -F | sed "s/^.*/	&/" >> $LOG_FILE
	else
		createJob -m CALIB -d $D -F | sed "s/^.*/	&/" 1>/dev/null
	fi

	QCBQS_TYPE=`grep "^QCBQS_TYPE"	$DFO_CONFIG_DIR/config.processQC | awk '{print $2}'`
	if [ -s $DFO_JOB_DIR/execQC_CALIB_$D ] && [ Q$QCBQS_TYPE != QIMPLICIT ]
	then
		if [ $FORCED_MODE = YES ]
		then
			writeLog "    New raw files:"
		fi

		if [ ! -s $DFO_MON_DIR/AUTO_DAILY/ABL_${D}.log ]
		then
			echo "# List of all CALIB ABs for date $D as created by autoDaily, 
# in order of creation.
# ISO timestamps in local time!" > $DFO_MON_DIR/AUTO_DAILY/ABL_${D}.log
		fi

# position of ABs depend on QCBQS_TYPE; IMPLICIT not supported (unclear if used at all)
		rm -f $TMP_DIR/ad_abs
		case $QCBQS_TYPE in
		 "SER" ) grep "processQC" $DFO_JOB_DIR/execQC_CALIB_$D | awk '{print $3}' > $TMP_DIR/ad_abs ;;
		 "PAR" ) grep "processQC" $DFO_JOB_DIR/execQC_CALIB_$D | awk '{print $1}' > $TMP_DIR/ad_abs ;;
		esac

		if [ -s $TMP_DIR/ad_abs ]
		then
			for AB in `cat $TMP_DIR/ad_abs`
			do
				ls -latr --time-style=long-iso $DFO_AB_DIR/$AB | awk '{print $6,$7,ab}' ab=$AB >> $DFO_MON_DIR/AUTO_DAILY/ABL_${D}.log
				if [ $FORCED_MODE = YES ]
				then
					TIMESTAMP=`date -u +%Y-%m-%d" "%H:%M:%S`
					grep "^RAWFILE" $DFO_AB_DIR/$AB | sed "s|/| |g" | awk '{print $4,$5}' | tr " " "\011" | sed "s/^.*/$TIMESTAMP    		&/" >> $LOG_FILE
				fi
			done
		fi

		if [ $FORCED_MODE = YES ]
		then
			scpLog
		fi
	fi
done

# =============================================================================
# 4.2 move JOBS file to JOBS_AUTO if not empty
# =============================================================================

mv $DFO_JOB_DIR/$JOB_FILE_NAME $DFO_JOB_DIR/$JOB_FILE_AUTO
mv $DFO_JOB_DIR/${JOB_FILE_NAME}.backup $DFO_JOB_DIR/$JOB_FILE_NAME

rm -f $TMP_DIR/check_exist
grep -v dfoMonitor $DFO_JOB_DIR/$JOB_FILE_AUTO > $TMP_DIR/check_exist
if [ -s $TMP_DIR/check_exist ]
then
	echo "dfoMonitor -m -q 2>/dev/null" >> $DFO_JOB_DIR/$JOB_FILE_AUTO
else
	sed -i -e "s/^.*/#&/" $DFO_JOB_DIR/$JOB_FILE_AUTO
fi

if [ $FORCED_MODE = NO ]
then
	echo "------------------------------------------------------------------------"  | sed "s/^.*/	&/"  >> $LOG_FILE
	echo "$JOB_FILE_NAME renamed to ${JOB_FILE_AUTO}, ready for execution." | sed "s/^.*/	&/" >> $LOG_FILE
	echo "-------------------------------------------------------------------" >> $LOG_FILE
fi

# =============================================================================
# 5. execute jobs 
# 5.1 JOBS_AUTO
# =============================================================================

if [ $FORCED_MODE = NO ]
then
	echo "2.4 Execute processing jobs for mode CALIB" >> $LOG_FILE
else
	writeLog "2.3 Execute processing jobs and "
	writeLog "2.4 extract QC information ..."
	scpLog
fi

echo "$DONE $DONE  $DONE $DONE $DONE $ACTION $PENDING calling ${JOB_FILE_AUTO}" > $TMP_DIR/message
quickUpdate 

if [ $FORCED_MODE = NO ]
then
	writeLog "$JOB_FILE_AUTO is launched ..."
fi

chmod a+x $DFO_JOB_DIR/$JOB_FILE_AUTO
if [ $FORCED_MODE = NO ]
then
	$DFO_JOB_DIR/$JOB_FILE_AUTO >> $LOG_FILE
	writeLog "autoDaily: finished processing"
else
	LAST_DATE=`cat $TMP_DIR/list_proc_dates | tail -1`
	$DFO_JOB_DIR/$JOB_FILE_AUTO 1>/dev/null
	writeLog "    ... finished processing."
	writeLog "    Check success under ${QCWEB_URL}/$LAST_DATE/status_${LAST_DATE}.html."
	rm -f $TMP_DIR/list_candidates $TMP_DIR/list_failures
	cat $DFO_MON_DIR/AB_list_CALIB_$D | grep -v DONE > $TMP_DIR/list_candidates
	if [ -s $TMP_DIR/list_candidates ]
	then
		for AB in `cat $TMP_DIR/list_candidates | awk '{print $1}'`
		do
			CHECK_STATUS=`grep PROCESS_STATUS $DFO_AB_DIR/$AB | grep FAILED | awk '{print $2}'`
			if [ "Q$CHECK_STATUS" != Q ]
			then
				echo $AB >> $TMP_DIR/list_failures
			fi
		done
		if [ -s $TMP_DIR/list_failures ]
		then
			TIMESTAMP=`date -u +%Y-%m-%d" "%H:%M:%S`
			writeLog "    Some jobs failed, probably because the files are not yet available in NGAS:" >> $LOG_FILE
			cat $TMP_DIR/list_failures | awk '{print $1}' | sed "s/^.*/$TIMESTAMP		&/" >> $LOG_FILE
			writeLog "    You may want to try again later."
		fi
	fi
	scpLog
fi

# =============================================================================
# 5.2 execHC file: all HC plots 
# - include all execHC1_CALIB_$D as produced by scoreQC
# =============================================================================

rm -f $TMP_DIR/execHC_TREND
for D in `cat $TMP_DIR/list_proc_dates`
do
	if [ -s $DFO_JOB_DIR/execHC1_CALIB_$D ]
	then
		cat $DFO_JOB_DIR/execHC1_CALIB_$D >> $TMP_DIR/execHC_TREND
	fi
done

if [ $FORCED_MODE = YES ]
then
	writeLog "2.5 Update HC monitor (under ${HC_URL}/<HEALTH or FULL>) ..."
	writeLog "    Updating reports (only for successful jobs)"
	writeLog "    (They will become visible immediately on the HQ server, and with some replication delay also on the PL server):"
	if [ ! -s $TMP_DIR/execHC_TREND ]
	then
		writeLog "	none."
	else
		TIMESTAMP=`date -u +%Y-%m-%d" "%H:%M:%S`
		cat $TMP_DIR/execHC_TREND | sort -u | awk '{print $4}' | sed "s/^.*/$TIMESTAMP		&/" >> $LOG_FILE
		N_REPORTS=`cat $TMP_DIR/execHC_TREND | sort -u | awk '{print $4}' | wc -l`
	fi
	scpLog
fi

cat > $DFO_JOB_DIR/execHC_TREND <<EOT
#!/bin/sh
EOT

# optimize launching of execHC:
# we could fire up to 6 trendPlotter jobs at once without significantly degrading execution time; we choose
# N=4 here

if [ -s $TMP_DIR/execHC_TREND ]
then
# support even non-standard trendPlotter calls with more than 3 parameters:
	cat $TMP_DIR/execHC_TREND | sort -u | awk 'BEGIN {NR=0} {if (int(NR/4) == 0) {print "sleep 1;",$0,"&"} else {print "sleep 1;" ,$0; NR=0}}' >> $DFO_JOB_DIR/execHC_TREND
	chmod a+x $DFO_JOB_DIR/execHC_TREND

	TIMESTAMP=`date +%Y-%m-%d" "%H:%M:%S`

	echo "$DONE $DONE  $DONE $DONE $DONE $DONE $DONE $ACTION execHC_TREND..." > $TMP_DIR/message
	quickUpdate 

	if [ $FORCED_MODE = NO ]
	then
		echo "" >> $LOG_FILE
		echo "-------------------------------------------------------------------" >> $LOG_FILE
		echo "3. Update HC plots" >> $LOG_FILE
		echo "$TIMESTAMP	Calling execHC_TREND ..." >> $LOG_FILE
		$DFO_JOB_DIR/execHC_TREND 1>/dev/null 2>/dev/null
		echo "" >> $LOG_FILE
		echo "... execHC_TREND executed." >> $LOG_FILE
	else
		$DFO_JOB_DIR/execHC_TREND 1>/dev/null 2>/dev/null
		writeLog "... done."
		writeLog "Processed jobs:		${N_JOBS}"
		writeLog "Updated HC plots:	${N_REPORTS}"
		writeLog "-------------------------------------------------------------------" 
		scpLog
	fi
else
	if [ $FORCED_MODE = NO ]
	then
		echo "" >> $LOG_FILE
		echo "-------------------------------------------------------------------" >> $LOG_FILE
		echo "3. Update HC plots: No execHC_TREND found." >> $LOG_FILE
	fi
fi

# =============================================================================
# 6. Manage outdated logs
# =============================================================================

dfoMonitor -m -q 2>/dev/null

exitProcs
calc_exectime

YY=`date -u +%Y`

#anything older than 2 yrs:
YY2=`echo $YY | awk '{print $1-2}'`
YY3=`echo $YY | awk '{print $1-3}'`
YY4=`echo $YY | awk '{print $1-4}'`

rm -f $DFO_MON_DIR/AUTO_DAILY/A*_${YY2}*log
rm -f $DFO_MON_DIR/AUTO_DAILY/A*_${YY3}*log
rm -f $DFO_MON_DIR/AUTO_DAILY/A*_${YY4}*log

rm -f $TMP_DIR/list_proc_dates

# =============================================================================
# 7. End
# =============================================================================
exit 0
