#!/bin/sh
# PURPOSE:      ingest calibration and science products into archive
# AUTHOR:       Burkhard Wolff / ESO
# VERSIONS:     1.0 -- March 2005
#		1.1 -- set cdbfile column in QC1 database (2005-07-26)
#		1.1.1- bug in file selection fixed (2005-09-14)
#		1.1.2- ingest and qc1 update lists go to $DFO_LST_DIR (2005-12-22)
#		1.1.3- prepared to run over night; bug with PRO.CATG list fixed (2006-01-23)
#		1.1.4- improved error handling, email sent (2006-01-26)
#		1.1.5- check consistency of NGAS host definition; dfosLog disabled (2006-05-02)
#		1.1.6- select PRO CATG list made unique (2006-11-08)
#		1.2 -- mode=SCIENCE supported; PRO.CLASS, PRO.TECH supported (2007-11-08; RH)
#		1.3 -- check for COMMENT keys before replacekey, check for raw file/header existence;
#		       create list of not successfully ingested frames (2007-12-13; BW)
#		1.4 -- writes call of cleanupProducts into job file (2008-01-24; RH)
#		1.4.1- fix for extremely large file numbers (2008-05-13; BW)
#		1.4.2- query to verify archive ingestion changed (2008-06-18; BW)
#		1.5 -- ingest ancillary fits files (2008-08-18)
#		1.5.1- tries to ingest ancillary files even if product dirs do not exist (2008-10-13; BW)
#		1.5.2- ingests ancillary files only if not yet ingested;
#		       QC1 update logs only saved in case of error (2008-10-16; BW)
#		1.5.3- bug fixed in line 513 (2008-10-30; BW)
#		1.5.4- warning message for already ingested files improved;
#		       bug fix in case product directories are empty (2009-12-17; BW)
#		1.6 -- enabled for parallel processing; check for existence and consistency of ancillary log files (2010-04-21; BW)
#		1.6.1- check for REF_NGAMS_HOST disabled (2011-05-23)
#		1.6.2- ING_FILE_NAME hard-coded (2011-10-10)
#		2.0 -- enabled for PHOENIX/IDP ingestion [RHa] (2013-07-30)
#		2.0.1- ingestion of SCIENCE fits files turned off in non-PHOENIX mode (2013-12-10)
#		2.1 -- parallel execution terminated; handling of ancillary files terminated; handling of IDPs modified (2014-05-08)
#		2.2 -- enabled in new standard way for DFOS and PHOENIX; ALL_INS_SUM entries added for IDPs (2014-08-18)
#		2.2.1- using new dpIngest instead of cdbIngest; replace calib..data_products by qc_metadata..qc_products, cdbfile --> origfile in queries (2014-09-04)
#		2.3 -- new option -f to call fitsverify, for testing and ticket creation (2014-09-15)
#		3.0 -- modified for phoenix v2, MCALIB production (2015-07-20)
#		3.0.1- minor improvement for PHOENIX_DAILY table (PHOENIX only) (2015-07-27)
#		3.0.2- JOBS_CLEANUP filled both for DFOS and for PHOENIX (2015-09-21)
#		3.0.3- small bug fix for PHOENIX_DAILY table (2016-04-27)
#		3.0.4- check INGESTED for uncommented ERRORs only; optional CLEANUP_PLUGIN (2016-06-22)
#		3.1 -- enabled for PHOENIX DEEP_MODE (2017-04-04)
#		3.1.1- bug fix for MJD (:537) (2017-08-10)
#		3.2 -- check for ENABLE_UPDATES; repeat dpIngest if failing (2018-01-08)
#		3.2.1- add pseudo_date in ingestion to daily_idpstat in MCAL case (2019-05-08; BWo)
#
# PARAMETERS: 	-m mode (CALIB or SCIENCE)	[optional parameter]
#               -d date (DATE)			[required parameter]
# OPTIONS:	-f call fitsverify only and exit [optional]
#
# TOOLS CALLED:	dpIngest; IDP ingestion tool, converter if used within PHOENIX
#
# CONFIG:	$DFO_CONFIG_DIR/config.ingestProducts	[required]
#
# OUTPUT:	$DFO_LST_DIR/list_ingest_${MODE}_${DATE}.txt 
#		$DFO_LST_DIR/list_qc1update_${MODE}_${DATE}.txt (only when error occured)
#
# COMMENTS: 	Within config.ingestProducts, it can be decided which PRO CATGs are ingested.
#		Switch QC1_UPDATE in config.ingestProducts determines whether update of QC1 database is done.
#		The handling of PRO.TECH is a safety measure, this is usually done by the pipelines.
#		In IDP mode, statistics are inserted into daily_idpstat and monthly_idpstat. 
# NOTE:		supporting PHOENIX
# =========================================================================
TOOL_VERSION=3.2.1
TOOL_NAME=ingestProducts

# fitsverify: currently turned off (see section 0.99)
# make sure in your $PATH that $DFO_BIN_DIR is found before /opt/cfitsio/bin and so this dummy fitsverify
# to disappear once the fitsverify issues with some pipeline products are gone

FITSVERIFY_CALL="/opt/cfitsio/bin/fitsverify"

# =========================================================================
# 0. preparation
# 0.1 check for DFOS environment variables and config file
# =========================================================================

CHECK=`printenv | grep DFO`

if [ "Q$CHECK" = "Q" ]
then
        echo "*** ERROR: DFO variables not defined. Check ~/.dfosrc and restart."
        exit -1
fi

if [ ! -s $DFO_CONFIG_DIR/config.ingestProducts ]
then
	echo "*** ERROR: No configuration file $DFO_CONFIG_DIR/config.ingestProducts found. Check and re-start."
	exit -1
fi

# check if PHOENIX
if [ Q$THIS_IS_PHOENIX != QYES ]
then
	THIS_IS_PHOENIX=NO
fi

# =========================================================================
# 0.2 parameter check
# =========================================================================

if [ $# = 0 ] 
then
	cat $DFO_DOC_DIR/ingestProducts.h 
        exit 0
fi

FITSVERIFY_ONLY=NO
ENABLE_UPDATES=NO
DEBUG=NO
DEEP_MODE=NO

while getopts m:d:DUfhv OPTION
do
	case "$OPTION" in
	 v ) echo $TOOL_VERSION
	     exit 0 ;;
	 h ) cat $DFO_DOC_DIR/ingestProducts.h
	     exit 0 ;;
	 d ) DATE=$OPTARG ;;
	 m ) MODE=$OPTARG ;;
	 f ) FITSVERIFY_ONLY=YES ;;
	 U ) ENABLE_UPDATES=YES ;;
	 D ) DEBUG=YES ;;
	esac
done

# Check for MODE and DATE
if [ Q$MODE = Q ]
then
	echo "***ERROR: you must specify <mode>."
	exit -1
fi

if [ Q$DATE = Q ]
then
	echo "***ERROR: you must specify <date>."
	exit -1
fi

if [ $# != 4 ] && [ $FITSVERIFY_ONLY = NO ] && [ $DEBUG = NO ] && [ $ENABLE_UPDATES = NO ]
then
	echo "***ERROR: you must specify <date> and <mode>."
	exit 0 
fi	

if [ $# != 5 ] && ( [ $FITSVERIFY_ONLY = YES ] || [ $ENABLE_UPDATES = YES ] )
then
	echo "***ERROR: you must specify <date> and <mode>."
	exit 0 
fi	

if [ "Q$DATE" = "Q" ]
then
	echo "***ERROR: DATE not specified."
	exit -1
fi

if [ $MODE != "CALIB" ] && [ $MODE != "SCIENCE" ] && [ $FITSVERIFY_ONLY = NO ]
then
	echo "***ERROR: Mode must be CALIB or SCIENCE. Re-start."
	exit -1
fi

if [ $MODE != "CALIB" ] && [ $FITSVERIFY_ONLY = YES ]
then
	echo "***ERROR: checking fitsverify for products is enabled for CALIB mode only."
	exit -1
fi

CHECK=`echo $DATE | wc -c`
if [ $CHECK != 11 ]
then
	echo "***ERROR: wrong date format: $DATE; should be: 2005-04-04"
	exit -1
fi

# YES--> check if SCIENCE or CALIB
if [ $THIS_IS_PHOENIX = YES ]
then
	if [ ! -s $DFO_CONFIG_DIR/config.phoenix ]
	then
		echo "***ERROR: no \$DFO_CONFIG_DIR/config.phoenix found, can't run in PHOENIX mode. Exit."
		exit -1
	fi

	MCAL_CONFIG=`grep "^MCAL_CONFIG" $DFO_CONFIG_DIR/config.phoenix | awk '{print $2}'`
	if [ Q$MCAL_CONFIG != Q ] && [ $MODE != CALIB ]
	then
		echo "***ERROR: config.phoenix is configured for MCALIB ingestion but the tool is called in mode SCIENCE. Exit."
		exit
	elif [ Q$MCAL_CONFIG = Q ] && [ $MODE = CALIB ]
	then
		echo "***ERROR: $TOOL_NAME is called for PHOENIX, MCALIB ingestion, but config.phoenix is not configured for MCALIB ingestion. Check!"
		exit
	fi

	DEEP_CONFIG=`grep "^DEEP_CONFIG" $DFO_CONFIG_DIR/config.phoenix | awk '{print $2}'`
	if [ Q$DEEP_CONFIG != Q ] && [ $MODE != SCIENCE ]
	then
		echo "***ERROR: config.phoenix is configured for DEEP IDP ingestion but the tool is called in mode CALIB. Exit."
		exit
	fi
	
	if [ Q$DEEP_CONFIG != Q ]
	then
		DEEP_MODE=YES
		echo "***INFO: $TOOL_NAME is called for PHOENIX, DEEP SCIENCE ingestion. If this is incorrect,"
	else
		echo "***INFO: $TOOL_NAME is called for PHOENIX, SCIENCE ingestion. If this is incorrect,"
	fi
	echo "         check your \$DFO_CONFIG_DIR/config.phoenix."
fi

# =========================================================================
# 0.3 some more preparation
# =========================================================================

if [ $DEEP_MODE = YES ]
then
	CONFIG=$DFO_CONFIG_DIR/$DEEP_CONFIG	
	DEEP_RESOURCE=`grep "^DEEP_RESOURCE"    $CONFIG | awk '{print $2}'`
	source $HOME/$DEEP_RESOURCE
	echo "*** INFO: Tool running in DEEP mode, using $DEEP_CONFIG and $DEEP_RESOURCE."
else
	CONFIG=$DFO_CONFIG_DIR/config.phoenix
fi

# set email switch
SEND_MAIL=`grep ^SEND_MAIL $DFO_CONFIG_DIR/config.ingestProducts | awk '{print $2}'`
if [ "Q$SEND_MAIL" != "QNO" ]
then
	SEND_MAIL=YES
fi

if [ "$SEND_MAIL" = "YES" ]
then
	rm -f $TMP_DIR/ip_mail
	ACCOUNT=`who am i | awk '{print $1}'`
fi

ING_LOG="list_ingest_${MODE}_${DATE}.txt"
UPD_LOG="list_qc1update_${MODE}_${DATE}.txt"

# check for pre-existing log file
if [ -s $DFO_LST_DIR/${ING_LOG} ] && [ $FITSVERIFY_ONLY = NO ]
then
	cp $DFO_LST_DIR/${ING_LOG} $TMP_DIR
fi

if [ $MODE = CALIB ] && [ $FITSVERIFY_ONLY = NO ] && [ $THIS_IS_PHOENIX = NO ]
then
	echo "- Ingestion of CALIB products for $DATE started ..."
elif [ $MODE = CALIB ] && [ $THIS_IS_PHOENIX = YES ] && [ Q$MCAL_CONFIG != Q ]
then
	PROC_INSTRUMENT=`grep "^PROC_INSTRUMENT" $DFO_CONFIG_DIR/$MCAL_CONFIG | awk '{print $2}'`
	if [ Q$PROC_INSTRUMENT = Q ]
	then
		echo "***ERROR: PROC_INSTRUMENT not defined in $DFO_CONFIG_DIR/$MCAL_CONFIG. Exit."
		exit -1
	fi
	RELEASE=`grep "^RELEASE"	$DFO_CONFIG_DIR/$MCAL_CONFIG | grep "[[:space:]]${PROC_INSTRUMENT}[[:space:]]" | awk '{print $3}'`
	INSTR_MODE=`grep "^INSTR_MODE"	$DFO_CONFIG_DIR/$MCAL_CONFIG | grep "[[:space:]]${PROC_INSTRUMENT}[[:space:]]" | awk '{print $3}'`
	INSTR_DIR="/home/qc/public_html/${RELEASE}"     # on $DFO_WEB_SERVER

	if [ $DEBUG = YES ]
	then
		echo "- Ingestion of PHOENIX MCALIB products for $DATE started (INTERACTIVE mode) ..."
	else
		echo "- Ingestion of PHOENIX MCALIB products for $DATE started (AUTOMATIC mode) ..."
	fi

elif [ $MODE = SCIENCE ] && [ $FITSVERIFY_ONLY = NO ]
then
	echo "- Ingestion of products for $DATE started ..."
	if [ $THIS_IS_PHOENIX = YES ]
	then
		PROC_INSTRUMENT=`grep "^PROC_INSTRUMENT" $CONFIG | awk '{print $2}'`
		if [ Q$PROC_INSTRUMENT != Q ]
		then
			RELEASE=`grep "^RELEASE"		$CONFIG | grep "[[:space:]]${PROC_INSTRUMENT}[[:space:]]" | awk '{print $3}'`
			INSTR_MODE=`grep "^INSTR_MODE"		$CONFIG | grep "[[:space:]]${PROC_INSTRUMENT}[[:space:]]" | awk '{print $3}'`
			INGEST_ENABLED=`grep "^INGEST_ENABLED" 	$CONFIG | awk '{print $2}'`

			if [ Q$RELEASE != Q ] && [ Q$INGEST_ENABLED != Q ]
			then
				echo "- MODE=SCIENCE: enabled for IDP ingestion, instrument $PROC_INSTRUMENT, release $RELEASE ..."
				PATH_TO_IT=`grep "^PATH_TO_IT" $DFO_CONFIG_DIR/config.ingestProducts | awk '{print $2}'`
				PATH_TO_IT=`eval "echo $PATH_TO_IT"`
				CONVERTER=`grep "^CONVERTER" $DFO_CONFIG_DIR/config.ingestProducts | awk '{print $2}'`

				PRODUCT_MARKER=`grep "^PRODUCT_MARKER" $DFO_CONFIG_DIR/config.phoenixMonitor | grep "$PROC_INSTRUMENT" | awk '{print $3}'`

				if [ Q$CONVERTER != Q ]
				then
					CONVERTER=`eval "echo $CONVERTER"`
				else
					echo "  No conversion tool configured."
				fi
			fi
		fi
# SCIENCE other than IDPs: not possible
	else
		echo "- MODE=SCIENCE: not possible, exit."
		exit
	fi
# FITSVERIFY_ONLY mode
else
	echo "- Calling fitsverify for CALIB products for $DATE (no ingestion done!) ...  "
fi

# =========================================================================
# 0.4 procedure checkPRE (for PHOENIX MCALIB ingestion: check for 
#     pre-existing mcalibs)
#     $1,$2 mjd range derived from $DATE
# =========================================================================
checkPRE(){
cat > $TMP_DIR/ip_query_exist <<EOT
select 
	origfile,
	pro_catg,
	ins_mode
from
	qc_metadata..qc_products
where
	instrume = "$DFO_INSTRUMENT"
and
	mjd_obs between $1 and $2
and
	dp_id like "M.%" 
GO
EOT

rm -f $TMP_DIR/ip_query_out
isql -S${QC1_SERVER} -Uqc -P`cat $QC1_PWD` -w999 -i $TMP_DIR/ip_query_exist | sed "1,2 d" | grep -v affected | sed "/^$/d" > $TMP_DIR/ip_query_out
}

# =========================================================================
# 0.5 procedure checkExist (for PHOENIX MCALIB ingestion: 
#     check if new mcalibs already exist)
# =========================================================================
checkExist(){
cat > $TMP_DIR/ip_query_exist2 <<EOT
select 
	origfile,
	pro_catg,
	ins_mode
from
	qc_metadata..qc_products
where
	origfile in (
EOT
	
ls $DFO_CAL_DIR/$DATE | grep $MCAL_CODE | grep fits | sed "s/^.*/\"&\",/" | sed "$,$ s/,//" >> $TMP_DIR/ip_query_exist2

cat >> $TMP_DIR/ip_query_exist2 <<EOT
)
go
EOT

rm -f $TMP_DIR/ip_query_out2
isql -S${QC1_SERVER} -Uqc -P`cat $QC1_PWD` -w999 -i $TMP_DIR/ip_query_exist2 | sed "1,2 d" | grep -v affected | sed "/^$/d" > $TMP_DIR/ip_query_out2
}

# =========================================================================
# 0.6 procedure ingestCalib (NEW with v3.2)
#     ingest a single master calib, try several time in case of error
# =========================================================================
ingestCalib(){
	FILE=$1

# try up to NUM_TRY ingestions, wait WAIT seconds before trying again
        NUM_TRY=3
        WAIT=5

# ingest (for PHOENIX: with option -force since we overwrite previous instances)
        if [ $THIS_IS_PHOENIX = YES ]
        then
                FORCE="-force"
        else
                FORCE=""
        fi

        PRO_CATG=`dfits $FILE | fitsort -d "PRO.CATG" | awk '{print $2}'`
        echo "  ... ingesting $FILE (PRO CATG: ${PRO_CATG})"

        IDX=0
        while [ $IDX -lt $NUM_TRY ]
        do
		rm -f $TMP_DIR/dpIngest_log
                dpIngest $FILE $FORCE 2>&1 >$TMP_DIR/dpIngest_log
                CHECK_ERROR=`egrep "ERROR|WARN" $TMP_DIR/dpIngest_log | grep -v "already present" | head -1`
                CHECK_INGEST=`egrep "ERROR|WARN" $TMP_DIR/dpIngest_log | grep "already present" | head -1`

# if ERROR occurred, try again
                if [ "Q$CHECK_ERROR" != Q ]
                then
                        IDX=`echo $IDX | awk '{print $1+1}'`
                        if [ $IDX -lt $NUM_TRY ]
                        then
                                echo "  ... an error occured. Trying again"
                                sleep $WAIT
                        else
# if ERROR persists, output the whole log file
                                echo "  ... unsucessful. Continuing with next file"
                                cat $TMP_DIR/dpIngest_log >> $TMP_DIR/${ING_LOG}
                        fi
                else
# if already ingested, also output the whole log file
                        if [ "Q$CHECK_INGEST" != Q ]
                        then
                                cat $TMP_DIR/dpIngest_log >> $TMP_DIR/${ING_LOG}
                                echo "  ... already ingested"
                        else
# otherwise only the last line
                                cat $TMP_DIR/dpIngest_log | grep "successfully processed" >> $TMP_DIR/${ING_LOG}
                                echo "  ... successful"
                        fi
                        IDX=$NUM_TRY
                fi
        done
}

# =========================================================================
# 0.9 Calling fitsverify
# =========================================================================
if [ $FITSVERIFY_ONLY = YES ]
then
	if [ ! -d $DFO_CAL_DIR/$DATE ]
	then
		echo "*** INFO: no $DFO_CAL_DIR/$DATE found. Exit."
		exit
	fi
	
	cd $DFO_CAL_DIR/$DATE
	CHECK_FITS=`ls | grep fits | head -1`	
	if [ Q$CHECK_FITS = Q ]
	then
		echo "*** INFO: no fits files found. 
Note: this could be due to previous run of cleanupProducts. 'fitsverify' can only run on fits files. Exit."
		exit
	fi

	SUMMARY=""
	for F in `ls | grep fits`
	do
		rm -f $TMP_DIR/ip_fitsout
		eval "$FITSVERIFY_CALL $F" > $TMP_DIR/ip_fitsout
		if [ $? != 0 ]
		then
			echo "$F: ERROR found"
			SUMMARY=ERROR
		fi
	done

	if [ Q$SUMMARY = Q ]
	then
		echo "... no errors found."
	fi
	
	echo "
For investigation of fitsverify in detail, call it like '$FITSVERIFY_CALL <file>' on the command line. Exit."
	exit
fi

# =========================================================================
# 0.99 Trick to turn off fitsverify
# =========================================================================
if [ ! -s $DFO_BIN_DIR/fitsverify ]
then
	cat > $DFO_BIN_DIR/fitsverify <<EOT
#!/bin/sh
exit 0
EOT
	chmod u+x $DFO_BIN_DIR/fitsverify
fi

# =========================================================================
# 1.0 PHOENIX=YES, SCIENCE
# 1.1 General preparation
# =========================================================================
ING_FILE_NAME=JOBS_INGEST

# check for existance of product directories
PRODUCTS_YN=YES
if [ $MODE =  CALIB ] 
then
	if [ ! -d $DFO_CAL_DIR/$DATE ]
	then
		echo "*** INFO: no $DFO_CAL_DIR/$DATE found. Exit." 
		sed -i -e "/ingestProducts -m CALIB -d $DATE/d" $DFO_JOB_DIR/$ING_FILE_NAME
		rm -f $DFO_BIN_DIR/fitsverify
		exit	
	fi
fi

if [ $MODE =  SCIENCE ] 
then
	if [ -d $DFO_SCI_DIR/$DATE ] && [ $THIS_IS_PHOENIX = NO ]
	then
		echo "***ERROR: $DFO_SCI_DIR/$DATE is existing. Check and remove, ingestion of SCIENCE fits files is not permitted. Hit return:"
		read input
		rm -f $DFO_BIN_DIR/fitsverify
		exit -1
	fi

	if [ ! -d $DFO_SCI_DIR/$DATE ]
	then
		echo "*** INFO: no $DFO_SCI_DIR/$DATE found. Exit." 
		sed -i -e "/ingestProducts -m SCIENCE -d $DATE/d" $DFO_JOB_DIR/$ING_FILE_NAME
		rm -f $DFO_BIN_DIR/fitsverify
		exit
	fi
fi

# =========================================================================
# 1.2 THIS_IS_PHOENIX = YES and SCIENCE
# =========================================================================

if [ $THIS_IS_PHOENIX = YES ] && [ $MODE =  SCIENCE ]
then

# =========================================================================
# 1.2.1 Converter
# =========================================================================

	if [ Q$CONVERTER != Q ]
	then
		echo "  - Calling $CONVERTER -d $DATE ..."
		$CONVERTER -d $DATE
# check for errors
		CHECK_ERROR=`grep "ERROR" $DFO_SCI_DIR/$DATE/CONVERTED | head -1`
		if [ "Q$CHECK_ERROR" != Q ]
		then
			echo "*** ERROR: An ERROR has occurred for the conversion of data for $DATE. No files ingested." > $TMP_DIR/ip_mail
			cat $TMP_DIR/ip_mail
			echo "           Please check. Exit."
			if [ "$SEND_MAIL" = "YES" ]
			then
				mail -s "ingestProducts for ${ACCOUNT}@${DFO_MACHINE}: An ERROR has occurred upon conversion on ${DATE}" $OP_ADDRESS < $TMP_DIR/ip_mail
			fi
			rm -f $DFO_BIN_DIR/fitsverify
			exit -1
		fi
	fi

# =========================================================================
# 1.2.2 ingestion tool
# =========================================================================
# $PATH_TO_IT could be disabled for testing
	if [ Q$PATH_TO_IT != Q ]
	then
		if [ $ENABLE_UPDATES = YES ]
		then
			ENABLE_UPDATES="-U"
		else
			ENABLE_UPDATES=""
		fi

		echo "  - Calling $PATH_TO_IT -d $DATE $ENABLE_UPDATES ..."
		$PATH_TO_IT -d $DATE $ENABLE_UPDATES

# check for errors
		CHECK_ERROR=`grep "ERROR" $DFO_SCI_DIR/$DATE/INGESTED | grep -v "^#" | head -1`
		if [ "Q$CHECK_ERROR" != Q ]
		then
			echo "*** ERROR: An ERROR has occurred for the ingestion of data for $DATE. No files ingested." > $TMP_DIR/ip_mail
			grep ERROR $DFO_SCI_DIR/$DATE/INGESTED >> $TMP_DIR/ip_mail
			cat >> $TMP_DIR/ip_mail <<EOT

If the error relates to "already been archived", and you want to overwrite (update) the file(s), call the tool again, with option -U (update), 
or set the config key ENABLE_UPDATES to YES.

Check the ingestion log $DFO_LST_DIR/${ING_LOG} for more details.
EOT

			echo "" >> $TMP_DIR/ip_mail
			
			cat $TMP_DIR/ip_mail
	
			echo "Please check. Exit."
			if [ "$SEND_MAIL" = "YES" ]
			then
				mail -s "ingestProducts for ${ACCOUNT}@${DFO_MACHINE}: An ERROR has occurred upon ingestion on ${DATE}" $OP_ADDRESS < $TMP_DIR/ip_mail
			fi
			rm -f $DFO_BIN_DIR/fitsverify
			exit -1
		fi
	else
		echo "  - No ingestion tool configured, no ingestion done, presumably this is a test. Exit."
		exit
	fi

# cleanup 
	sed -i -e "/$DATE/d" $DFO_JOB_DIR/$ING_FILE_NAME

# =========================================================================
# 1.2.3 Metrics, this ingestion (note for development: this part is developed for SCIENCE;
#       since later MCALIB has been added but the stats have different properties, a 
#       corresponding part has been added for MCALIB which is under Sect.2.8. This is a bit
#       difficult to maintain if certain parts of the logics would have to changed in both 
#       sections.
# =========================================================================
	echo "  Calculating daily statistics, updating $DFO_MON_DIR/PHOENIX_DAILY_$RELEASE ..."

	if [ -d $DFO_SCI_DIR/$DATE/conv ]
	then
		N_PROD=`ls $DFO_SCI_DIR/$DATE/conv/*fits | egrep "$PRODUCT_MARKER" | wc -l`
		N_PROD_ALL=`ls $DFO_SCI_DIR/$DATE/conv/* | wc -l`

		SIZE_PROD=`du -ks     $DFO_SCI_DIR/$DATE/conv/*fits | egrep "$PRODUCT_MARKER" | awk '{sum+=$1} END {print sum/1024.}'`
		SIZE_PROD_ALL=`du -ks $DFO_SCI_DIR/$DATE/conv | awk '{print $1/1024.}'`
	else
		N_PROD=0
		N_PROD_ALL=0
		SIZE_PROD=0
		SIZE_PROD_ALL=0
	fi

	rm -f $TMP_DIR/ip_daily_stats
	if [ -s $DFO_MON_DIR/PHOENIX_DAILY_$RELEASE ]
	then
		grep "^$DATE" $DFO_MON_DIR/PHOENIX_DAILY_$RELEASE > $TMP_DIR/ip_daily_stats

		TODAY=`date +%Y-%m-%d`
		if [ $DEEP_MODE = YES ]
		then
			MJD=`qcdate $TODAY`
		else
			MJD=`qcdate $DATE`
		fi

		if [ -s $TMP_DIR/ip_daily_stats ]
		then
# QC1 database: daily_idpstat
			echo "  Updating daily_idpstat for $PROC_INSTRUMENT ..."
			cat $TMP_DIR/ip_daily_stats |\
		 	 awk '{print "qc1Ingest -instrume qc -table daily_idpstat -mjd_obs",mjd,"-ingestion_date",today, \
 "-civil_date", $1,			\
 "-instrument", proc_instrument,	\
 "-instr_mode", instr_mode, 		\
 "-N_SCI_RAW",$7,		\
 "-MB_SCI_RAW",$8,		\
 "-N_SCI_PRO",n_prod,		\
 "-N_SCI_PRO_ALL",n_prod_all,	\
 "-MB_SCI_PRO",size_prod,	\
 "-MB_SCI_PRO_ALL",size_prod_all, \
 "-N_AB_ALL",$2,		\
 "-N_AB_REJ",$3,		\
 "-N_AB_EXE",$4,		\
 "-T_AB_EXE",$5,		\
 "-T_QC_EXE",$6			\
 }' mjd=$MJD today=$TODAY proc_instrument=$PROC_INSTRUMENT instr_mode=$INSTR_MODE n_prod=$N_PROD n_prod_all=$N_PROD_ALL size_prod=$SIZE_PROD size_prod_all=$SIZE_PROD_ALL> $TMP_DIR/ip_new_entry

# DEEP: add the pseudo_date, to track the run_ID packages 
			if [ $DEEP_MODE = YES ]
			then
				sed -i -e "s/-civil_date/& $TODAY -pseudo_date/" $TMP_DIR/ip_new_entry
			else
# dummy entry as required by qc1Ingest
				sed -i -e "s/-civil_date/-pseudo_date 2099-99-99 &/" $TMP_DIR/ip_new_entry
			fi

			chmod u+x $TMP_DIR/ip_new_entry
			$TMP_DIR/ip_new_entry | sed "s/^.*/  &/"
			cat $TMP_DIR/ip_new_entry >> $DFO_MON_DIR/backup_daily_qc1Ingest

			sed -i -e "/^$DATE/s|N_prod[[:space:]]*size_prod|$N_PROD	$N_PROD_ALL	$SIZE_PROD	$SIZE_PROD_ALL|" $DFO_MON_DIR/PHOENIX_DAILY_$RELEASE
			sed -i -e "/^$DATE/s|^.*|&	$TODAY|"	$DFO_MON_DIR/PHOENIX_DAILY_$RELEASE
		else
			echo "***WARNING: something went wrong with calculating stats for $DATE: not contained in $DFO_MON_DIR/PHOENIX_DAILY_$RELEASE."
		fi
	else
		echo "***ERROR: $DFO_MON_DIR/PHOENIX_DAILY_$RELEASE not found!
Ingestion statistics:
Number of ingested IDPs:	$N_PROD
Number of all ingested files:	$N_PROD_ALL
Size of ingested IDPs:		$SIZE_PROD
Size of all ingested files:	$SIZE_PROD_ALL"
	fi

# =========================================================================
# 1.2.4 Monthly stats update
# =========================================================================
	echo "  Updating monthly_idpstat for $PROC_INSTRUMENT ..."
	FIRST_DATE1=`echo $TODAY | cut -c1-7`
	MJD=`qcdate ${FIRST_DATE1}-01`

	cat > $TMP_DIR/idp_query <<EOT
select distinct
	"${FIRST_DATE1}-01",
 	instrument, 
	instr_mode,
	sum(N_SCI_RAW),
	sum(MB_SCI_RAW)/1024.,
	sum(N_SCI_PRO),
	sum(N_SCI_PRO_ALL),
	sum(MB_SCI_PRO)/1024.,
	sum(MB_SCI_PRO_ALL)/1024.,
	sum(N_AB_ALL),
	sum(N_AB_REJ),
	sum(N_AB_EXE),
	sum(T_AB_EXE)/3600.,
	sum(T_QC_EXE)/3600.
from
	daily_idpstat
where
	instrument = "$PROC_INSTRUMENT"
and
	instr_mode = "$INSTR_MODE"
and
	convert(varchar(7),ingestion_date,112) = "$FIRST_DATE1"	
and
        visible = "Y"
group by
        instr_mode
go
EOT
	rm -f $TMP_DIR/idp_results
	isql -Uqc -P`cat $QC1_PWD` -S${QC1_SERVER} -w999 -i $TMP_DIR/idp_query |  grep -v affected | sed "1,2 d" | sed "/^$/d" > $TMP_DIR/idp_results

	if [ -s $TMP_DIR/idp_results ]
	then
		cat $TMP_DIR/idp_results |\
	 awk '{print "qc1Ingest -instrume qc -table monthly_idpstat -mjd_obs",mjd, \
 "-civil_date", $1,             \
 "-instrument", $2,		\
 "-instr_mode", $3,     	\
 "-N_SCI_RAW",$4,               \
 "-GB_SCI_RAW",$5,              \
 "-N_SCI_PRO",$6,    		\
 "-N_SCI_PRO_ALL",$7,  		\
 "-GB_SCI_PRO",$8,  		\
 "-GB_SCI_PRO_ALL",$9,  	\
 "-N_AB_ALL",$10,                \
 "-N_AB_REJ",$11,                \
 "-N_AB_EXE",$12,                \
 "-T_AB_EXE",$13,                \
 "-T_QC_EXE",$14                 \
 }' mjd=$MJD > $TMP_DIR/idp_new_monthly_entry

		chmod u+x $TMP_DIR/idp_new_monthly_entry
		$TMP_DIR/idp_new_monthly_entry | sed "s/^.*/  &/"

# for DEBUG
	else
		echo "no data found."
	fi

# =========================================================================
# 1.2.5 Monthly stats for ALL_INS_SUM
# =========================================================================
	echo "  updating monthly_idpstat for ALL_INS_SUM ..."
	FIRST_DATE2=`echo "${FIRST_DATE1}-01" | sed "s/-//"g`
	cat > $TMP_DIR/idp_query_all <<EOT
select distinct
	"$FIRST_DATE2",
	"ALL_INS_SUM",
	"ALL_INS_SUM",
	sum(N_SCI_RAW),
	sum(GB_SCI_RAW),
	sum(N_SCI_PRO),
	sum(N_SCI_PRO_ALL),
	sum(GB_SCI_PRO),
	sum(GB_SCI_PRO_ALL),
	sum(N_AB_ALL),
	sum(N_AB_REJ),
	sum(N_AB_EXE),
	sum(T_AB_EXE),
	sum(T_QC_EXE)
from
	monthly_idpstat
where
	instrument != "ALL_INS_SUM"
and
	instr_mode != "ALL_INS_SUM"
and
	convert(varchar(10),civil_date,112) = "$FIRST_DATE2"	
and
        visible = "Y"
go
EOT

	rm -f $TMP_DIR/idp_results_all
	isql -Uqc -P`cat $QC1_PWD` -S${QC1_SERVER} -w999 -i $TMP_DIR/idp_query_all |  grep -v affected | sed "1,2 d" | sed "/^$/d" > $TMP_DIR/idp_results_all

# ingest updated sum into monthly_idpstat
	if [ -s $TMP_DIR/idp_results_all ]
	then
		cat $TMP_DIR/idp_results_all |\
	 awk '{print "qc1Ingest -instrume qc -table monthly_idpstat -mjd_obs",mjd, \
 "-civil_date", $1,             \
 "-instrument", "ALL_INS_SUM",	\
 "-instr_mode", "ALL_INS_SUM",  	\
 "-N_SCI_RAW",$4,               \
 "-GB_SCI_RAW",$5,              \
 "-N_SCI_PRO",$6,    		\
 "-N_SCI_PRO_ALL",$7,  		\
 "-GB_SCI_PRO",$8,  		\
 "-GB_SCI_PRO_ALL",$9,  	\
 "-N_AB_ALL",$10,                \
 "-N_AB_REJ",$11,                \
 "-N_AB_EXE",$12,                \
 "-T_AB_EXE",$13,                \
 "-T_QC_EXE",$14                 \
 }' mjd=$MJD > $TMP_DIR/idp_new_monthly_entry

		chmod u+x $TMP_DIR/idp_new_monthly_entry
		$TMP_DIR/idp_new_monthly_entry | sed "s/^.*/  &/"
	fi

# =========================================================================
# 1.2.7 write cleanupProducts call into JOB file
# =========================================================================

	if [ ! -s $DFO_JOB_DIR/JOBS_CLEANUP ]
	then
		cat > $DFO_JOB_DIR/JOBS_CLEANUP <<EOT
#!/bin/sh
# This is the jobs file for cleanup products (after successful run of ingestProducts)

EOT
		chmod u+x $DFO_JOB_DIR/JOBS_CLEANUP
	fi
	CHECK_PREV=`grep "cleanupProducts -d $DATE" $DFO_JOB_DIR/JOBS_CLEANUP | head -1`
	if [ "Q$CHECK_PREV" = Q ]
	then
		echo "cleanupProducts -d $DATE" >> $DFO_JOB_DIR/JOBS_CLEANUP
	fi

# exit
	rm -f $DFO_BIN_DIR/fitsverify
	exit 0
fi

# =========================================================================
# 2. MODE=CALIB, PHOENIX=NO or YES (reprocessing)
#    Only MODE=CALIB possible at this point.
# =========================================================================

if [ ! -d $DFO_LOG_DIR/$DATE ]
then
	echo "*** ERROR: $DFO_LOG_DIR/$DATE does not exist."
	echo "           Please check. Exit."
	if [ "$SEND_MAIL" = "YES" ]
	then
		echo "ingestProducts for ${ACCOUNT}@${DFO_MACHINE}: An ERROR has occurred on ${DATE}. $DFO_LOG_DIR/$DATE does not exist. No files ingested." > $TMP_DIR/ip_mail
		mail -s "ingestProducts for ${ACCOUNT}@${DFO_MACHINE}: An ERROR has occurred on ${DATE}" $OP_ADDRESS < $TMP_DIR/ip_mail
	fi
	rm -f $DFO_BIN_DIR/fitsverify
	exit -1
else
	cd $DFO_LOG_DIR/$DATE
fi

# =========================================================================
# 2.1 set SWITCH, prepare listings (for PHOENIX=NO only)
# =========================================================================

if [ "$PRODUCTS_YN" = "YES" ] && [ $THIS_IS_PHOENIX = NO ]
then
	 cd $DFO_CAL_DIR/$DATE

# SWITCH
	SWITCH=`grep "^INGEST_SWITCH" $DFO_CONFIG_DIR/config.ingestProducts | awk '{print $2}'`
	if [ "$SWITCH" != "ALL" ] && [ "$SWITCH" != "SELECT" ] && [ "$SWITCH" != "DESELECT" ]
	then
		echo "*** ERROR: INGEST_SWITCH in config.ingestProducts has wrong value. Exit."
		rm -f $DFO_BIN_DIR/fitsverify
		exit -1
	fi

	echo "  Ingestion mode: $SWITCH"

# file list
	rm -f $TMP_DIR/ip_file_list
	TMP_LIST=`ls | grep fits | grep "^${MCAL_CODE}"`
	for F in $TMP_LIST
	do
		dfits $F | fitsort -d pro.catg pipefile | grep -v "error: no input data" >> $TMP_DIR/ip_file_list
	done

	if [ ! -s $TMP_DIR/ip_file_list ]
	then
		echo "*** ERROR: no fits file found for ingestion. Files already replaced with their headers?"
		rm -f $DFO_BIN_DIR/fitsverify
		exit
	fi

# =========================================================================
# 2.2 select files to be ingested
# =========================================================================

	FILE_LIST=""
	if [ "$SWITCH" = "ALL" ]
	then
		FILE_LIST=`awk '{print $1}' $TMP_DIR/ip_file_list`
	else
		CATG_LIST=`grep "^PRO_CATG" $DFO_CONFIG_DIR/config.ingestProducts | awk '{print $2}' | sort -u`
		if [ "Q$CATG_LIST" = "Q" ]
		then
			echo "*** ERROR: list of PRO CATGs not found in config.ingestProducts. Exit."
			rm -f $DFO_BIN_DIR/fitsverify
			exit -1
		fi

		if [ "$SWITCH" = "SELECT" ]
		then
			rm -f $TMP_DIR/ip_edit_list
			for C in $CATG_LIST
			do
				grep "[[:space:]]$C[[:space:]]" $TMP_DIR/ip_file_list >> $TMP_DIR/ip_edit_list
			done
		else
			rm -f $TMP_DIR/ip_edit_list $TMP_DIR/ip_edit_list2
			cp $TMP_DIR/ip_file_list $TMP_DIR/ip_edit_list
			for C in $CATG_LIST
			do
				grep -v "[[:space:]]$C[[:space:]]" $TMP_DIR/ip_edit_list > $TMP_DIR/ip_edit_list2
				mv $TMP_DIR/ip_edit_list2 $TMP_DIR/ip_edit_list
			done
		fi

		if [ -s $TMP_DIR/ip_edit_list ]
		then
			FILE_LIST=`awk '{print $1}' $TMP_DIR/ip_edit_list`
		fi
	fi
fi

# =========================================================================
# 2.3 PHOENIX=YES: prepare the deletion file
# =========================================================================

if [ $MODE = CALIB ] && [ $THIS_IS_PHOENIX = YES ] && [ Q$MCAL_CONFIG != Q ]
then
	cd $DFO_CAL_DIR/$DATE
	echo "- Compare pre-existing and new mcalibs (only analyzing; no deletes or ingestion done at that point) ..."
	echo "  You can use that analysis for checking and fine-tuning your configuration."
	if [ $DEBUG = YES ]
	then
		echo "Hit return to continue:"
		read input
	else
		echo ""
	fi
	rm -f $TMP_DIR/ip_insmode $TMP_DIR/ip_procatg $TMP_DIR/ip_exclprocatg
	grep "^PHX_DELETE"	$DFO_CONFIG_DIR/config.ingestProducts | awk '{print $2,$3}' > $TMP_DIR/ip_insmode	

	MJD1=`qcdate $DATE  | awk '{printf"%10.5f\n",$1+off/24+0.50}' off=$DFO_OFFSET `
	MJD2=`echo $MJD1 | awk '{print $1+0.99999}'`

	checkPRE $MJD1 $MJD2
	checkExist

# compare existing and new files
	rm -f $TMP_DIR/ip_delete
	if [ -s $TMP_DIR/ip_query_out ]
	then
		for MC in `cat $TMP_DIR/ip_query_out | awk '{print $1}'`
		do
			CHECK_EXIST=`grep $MC $TMP_DIR/ip_query_out2`
			if [ "Q$CHECK_EXIST" != Q ]
			then
				echo "X $MC	REPLACED" >> $TMP_DIR/ip_delete
			else
				PRO_CATG=`grep "$MC" $TMP_DIR/ip_query_out | awk '{print $2}'`
				INS_MODE=`grep "$MC" $TMP_DIR/ip_query_out | awk '{print $3}'`
# check if file is configured for deletion
				if [ -s $TMP_DIR/ip_insmode ]
				then
					CHECK_PRO=`grep "^${PRO_CATG}[[:space:]]" $TMP_DIR/ip_insmode`
				fi

				if [ "Q$CHECK_PRO" = Q ]
				then
					echo "o $MC	REMAINING" >> $TMP_DIR/ip_delete
				else
# check if INS_MODE is configured
					if [ -s $TMP_DIR/ip_insmode ]
					then
						CONFIG_MODE=`grep "^${PRO_CATG}[[:space:]]" $TMP_DIR/ip_insmode | awk '{print $2}'`
					fi

					if [ Q$CONFIG_MODE = QANY ] 
					then
						echo "o $MC	DELETED" >> $TMP_DIR/ip_delete
					elif [ Q$CONFIG_MODE = Q ]
					then
						echo "***ERROR: PHX_DELETE entry for ${PRO_CATG} has an error, entry for INS.MODE missing. We assume ANY."
						echo "o $MC	REMAINING" >> $TMP_DIR/ip_delete
					else
						CHECK_MATCH=`echo $CONFIG_MODE | egrep "^${INS_MODE}|,${INS_MODE}"`
						if [ "Q$CHECK_MATCH" != Q ]
						then
							echo "o $MC	DELETED" >> $TMP_DIR/ip_delete
						else
							echo "o $MC	REMAINING" >> $TMP_DIR/ip_delete
						fi
					fi
				fi
			fi
		done

# finally, we check if a mcalib is new
		for MC in `ls $DFO_CAL_DIR/$DATE | grep $MCAL_CODE | grep fits`
		do
			CHECK_NEW=`grep "$MC" $TMP_DIR/ip_query_out`
			if [ "Q$CHECK_NEW" = Q ]
			then
				echo "X $MC	NEW" >> $TMP_DIR/ip_delete
			fi
		done
# if no old mcalibs exist, any existing one is new
	else
		for MC in `ls $DFO_CAL_DIR/$DATE | grep $MCAL_CODE | grep fits`
		do
			CHECK_NEW=`grep "$MC" $TMP_DIR/ip_query_out`
			if [ "Q$CHECK_NEW" = Q ]
			then
				echo "X $MC	NEW" >> $TMP_DIR/ip_delete
			fi
		done
	fi

# Now evaluate the file $TMP_DIR/ip_delete
	if [ -s $TMP_DIR/ip_delete ]
	then
		echo "1. The following files in the calibDB will be hidden by configuration (sect. 3.)
   but *not* be replaced (by name):" > $TMP_DIR/ip_interaction
		CHECK_EXIST=`cat $TMP_DIR/ip_delete | grep "^o" | grep DELETED | head -1`
		if [ "Q$CHECK_EXIST" = Q ]
		then
			echo "None." >> $TMP_DIR/ip_interaction
			if [ $DEBUG = YES ]
			then
				cat $TMP_DIR/ip_interaction
				echo "Hit return to continue:"
				read CONTI_YN
			fi
			cat $TMP_DIR/ip_interaction > $TMP_DIR/ip_log
		else
			if [ $DEBUG = YES ]
			then
				cat $TMP_DIR/ip_interaction
				cat $TMP_DIR/ip_delete | grep "^o" | grep DELETED
				echo "Enter E to exit and investigate, or C to continue (C):"
				read CONTI_YN
			fi
			cat $TMP_DIR/ip_interaction > $TMP_DIR/ip_log
			cat $TMP_DIR/ip_delete | grep "^o" | grep -v PROTECTED >> $TMP_DIR/ip_log
		fi

		if [ Q$CONTI_YN = QE ]
		then
			exit
		fi

		echo "" > $TMP_DIR/ip_interaction	
		echo "2. The following files will be replaced by new mcalib files 
with the same name ("REPLACED") or will be ingested without replacing ("NEW"):" >> $TMP_DIR/ip_interaction
		cat $TMP_DIR/ip_delete | grep "^X" >> $TMP_DIR/ip_interaction
		cat $TMP_DIR/ip_interaction >> $TMP_DIR/ip_log
		if [ $DEBUG = YES ]
		then	
			cat $TMP_DIR/ip_interaction
			echo "Hit return to continue:"
			read CONTI_YN
		fi
	fi

	CHECK_OTHERS=`grep REMAINING $TMP_DIR/ip_delete 2>/dev/null | head -1`
	if [ "Q$CHECK_OTHERS" != Q ]
	then
		echo "" > $TMP_DIR/ip_interaction
		echo "3. The following files will remain unchanged (not hidden, not replaced)" >> $TMP_DIR/ip_interaction
                echo "  (because their pro.catg/ins.mode is not configured for deletion; they could be static calibrations, or from a mode not affected by this project):" >> $TMP_DIR/ip_interaction
		cat $TMP_DIR/ip_delete | grep REMAINING >> $TMP_DIR/ip_interaction
	else
		echo "3. No files remaining unchanged (not hidden, not replaced)." > $TMP_DIR/ip_interaction
	fi

	cat $TMP_DIR/ip_interaction >> $TMP_DIR/ip_log 
	if [ $DEBUG = YES ]
	then
		cat $TMP_DIR/ip_interaction
		echo "Hit return to continue:"
		read input
	fi

# deletion file
	cat $TMP_DIR/ip_delete 2>/dev/null | grep "DELETED" | awk '{print $2}' | sed "s/^.*/dpDelete & -force/" > $TMP_DIR/ip_delete_doit
	if [ -s $TMP_DIR/ip_delete_doit ]
	then
		echo "" > $TMP_DIR/ip_interaction
		echo "4. This is the deletion file (files that are hidden but not replaced):" >> $TMP_DIR/ip_interaction
		cat $TMP_DIR/ip_delete_doit >> $TMP_DIR/ip_interaction
		if [ $DEBUG = YES ]
		then
			cat $TMP_DIR/ip_interaction
		fi
		cat $TMP_DIR/ip_interaction >> $TMP_DIR/ip_log
	fi

# =========================================================================
# 2.4 PHOENIX=YES: Call dpDelete
# =========================================================================

	if [ -s $TMP_DIR/ip_delete_doit ]
	then
		if [ $DEBUG = YES ]
		then
			echo ""
			echo "... analysis finished."
			echo " Should we continue with file deletion and ingestion (up to now we have not change anything, you can exit and re-start safely) (Y/N) (N)?"
			read CALL_YN
			if [ Q$CALL_YN != QY ]
			then
				echo "Think about it. Exit."
				exit
			fi
		fi
		echo "" >> $TMP_DIR/ip_log
		echo "- Deleting ..." | tee -a $TMP_DIR/ip_log
		for F in `cat $TMP_DIR/ip_delete_doit | awk '{print $2}'`
		do
			echo "  ... deleting $F " 
			if [ $DEBUG = YES ]
			then
				dpDelete $F -force | tee -a $TMP_DIR/ip_log
			else
				dpDelete $F -force >> $TMP_DIR/ip_log
			fi
		done

		echo "... pre-existing mcalibs deleted. We are ready for ingestion." 
		if [ $DEBUG = YES ]
		then
			echo "Hit return to continue:"
			read input
		fi
	fi
	cat $TMP_DIR/ip_log > $TMP_DIR/${ING_LOG}

# to have the same $TMP_DIR/ip_file_list as for non-PHOENIX 
	rm -f $TMP_DIR/ip_file_list*
	cat $TMP_DIR/ip_delete 2>/dev/null | egrep "REPLACED|NEW" | awk '{print $2}' > $TMP_DIR/ip_file_list1
	if [ -s $TMP_DIR/ip_file_list1 ]
	then
		for F in `cat $TMP_DIR/ip_file_list1`
		do
			dfits $F | fitsort -d pro.catg pipefile >> $TMP_DIR/ip_file_list
		done
	fi

	FILE_LIST=`cat $TMP_DIR/ip_file_list 2>/dev/null | awk '{print $1}'`
	echo "- Ingesting ..." | tee -a $TMP_DIR/${ING_LOG}
fi

# =========================================================================
# 2.5 ingest CALIB (no matter if PHOENIX or not)
# =========================================================================

if [ $MODE = CALIB ] 
then
	if [ "Q$FILE_LIST" = "Q" ]
	then
		echo "*** INFO: no fits files found for $DATE"
	else
		for F in $FILE_LIST
		do
			CHECK_PROSCIENCE=`dfits $F | fitsort -d pro.science | awk '{print $2}'`
			CHECK_PROTECH=`dfits    $F | fitsort -d pro.tech | awk '{print $2}'`

			if [ Q$CHECK_PROSCIENCE = Q ]
			then
				CNT_COMM=`dfits $F | grep COMMENT | wc -l`
				if [ $CNT_COMM -eq 0 ]
				then
					echo "*ERROR: $F: cannot insert pro.science; product not ingested" | tee -a $TMP_DIR/${ING_LOG}
					continue
				fi
			fi

# find parent raw file, copy content of DPR.TECH to PRO.TECH
			if [ Q$CHECK_PROTECH = Q ]
			then
				CNT_COMM=`dfits $F | grep COMMENT | wc -l`
				if [ $CNT_COMM -eq 0 ]
				then
					echo "*ERROR: $F: cannot insert pro.tech; product not ingested" | tee -a $TMP_DIR/${ING_LOG}
					continue
				fi
				FIRST_RAW=`dfits $F | fitsort -d "PRO.REC1.RAW1.NAME" | awk '{print $2}'`
				if [ "Q$FIRST_RAW" = "Q" ]
				then
					echo "*WARNING: $F: PRO.REC1.RAW1.NAME not existing, cannot define key pro.tech; file not archived" | tee -a $TMP_DIR/${ING_LOG}
					continue
				else
					if [ ! -s $DFO_RAW_DIR/$DATE/$FIRST_RAW ]
					then
						FIRST_RAW=`echo $FIRST_RAW | sed "s/fits/hdr/"`
						if [ ! -s $DFO_HDR_DIR/$DATE/$FIRST_RAW ]
						then	
							echo "*WARNING: $F: parent raw file not found, cannot define key pro.tech; file not archived" | tee -a $TMP_DIR/${ING_LOG}
							continue
						else
							PRO_TECH=`dfits $DFO_HDR_DIR/$DATE/$FIRST_RAW | fitsort -d DPR.TECH | awk '{print $2}'`
							replacekey -p COMMENT -k "HIERARCH ESO PRO TECH" -v "$PRO_TECH" -c "Observation technique" $F > /dev/null 2>> $TMP_DIR/${ING_LOG}
						fi
					else
						PRO_TECH=`dfits $DFO_RAW_DIR/$DATE/$FIRST_RAW | fitsort -d DPR.TECH | awk '{print $2}'`
						replacekey -p COMMENT -k "HIERARCH ESO PRO TECH" -v "$PRO_TECH" -c "Observation technique" $F > /dev/null 2>> $TMP_DIR/${ING_LOG}
					fi	
				fi
			fi

# up to 3 attempts (due to NGAS issue)
			ingestCalib $F
		done
	fi
fi

# =========================================================================
# 2.6 check archive content
# =========================================================================

echo "  ... done.
  Now we check archive entries ..."

# prepare and execute queries in slice of 500 files to avoid SQL error on large lists
rm -f $TMP_DIR/ip_finalquery $TMP_DIR/ip_archived_slice $TMP_DIR/ip_archived

FILE_NUM=`echo $FILE_LIST | wc -w`
FILE_CNT=0
SLICE_CNT=0

for F in $FILE_LIST
do
	FILE_CNT=`echo $FILE_CNT | awk '{print $1+1}'`
	SLICE_CNT=`echo $SLICE_CNT | awk '{print $1+1}'`
	if [ $SLICE_CNT -eq 1 ]
	then
		cat > $TMP_DIR/ip_finalquery <<EOF
select origfile 
from qc_metadata..qc_products,ngas..ngas_files
where origfile in (
EOF

	fi
	if [ $SLICE_CNT -lt 500 ] && [ $FILE_CNT -lt $FILE_NUM ]
	then
		echo $F | sed "s/^.*fits/\"&\",/" >> $TMP_DIR/ip_finalquery
	else
		echo $F | sed "s/^.*fits/\"&\"/" >> $TMP_DIR/ip_finalquery
		cat >> $TMP_DIR/ip_finalquery <<EOF
)
and
dp_id = file_id
go
EOF
		isql -Uqc -P`cat $QC1_PWD` -S${QC1_SERVER} -w999 -i $TMP_DIR/ip_finalquery -o $TMP_DIR/ip_archived_slice
		cat $TMP_DIR/ip_archived_slice >> $TMP_DIR/ip_archived
		SLICE_CNT=0
	fi
done

# check results
rm -rf $TMP_DIR/ip_notarchived
for F in $FILE_LIST
do
	CHECK=`grep $F $TMP_DIR/ip_archived | wc -l`
	if [ $CHECK -eq 0 ]
	then
		echo "*NOT ARCHIVED: $F" >> $TMP_DIR/ip_notarchived
	fi
done

# evaluate ip_notarchived
if [ -s $TMP_DIR/ip_notarchived ]
then
	echo "*ERROR: some files not found in archive"
	cat $TMP_DIR/ip_notarchived >> $TMP_DIR/${ING_LOG}
else
	echo "  ... done. All files found in archive."
fi

# =========================================================================
# 2.7 copy log, check for errors, send email
# =========================================================================

# check for errors in log file
TODAY=`date +%Y-%m-%d`
if [ -s $TMP_DIR/${ING_LOG} ]
then
	mv $TMP_DIR/${ING_LOG} $DFO_LST_DIR/

	rm -f $TMP_DIR/ip_check
	grep "^${TODAY}" $DFO_LST_DIR/${ING_LOG} | egrep -v "uccessfully|uccesfully" | grep -v "already present" | grep -v "ingestion finished" | grep -v "found in database" > $TMP_DIR/ip_check
	if [ -s $TMP_DIR/ip_check ]
	then
		echo "... ingestion finished. An ERROR may have occurred. Check $DFO_LST_DIR/${ING_LOG} ." | tee -a $DFO_LST_DIR/${ING_LOG}
		if [ "$SEND_MAIL" = "YES" ]
		then
			echo "ingestProducts for ${ACCOUNT}@${DFO_MACHINE}: An ERROR may have occurred on ${DATE}. Check $DFO_LST_DIR/${ING_LOG} ." > $TMP_DIR/ip_mail
			mail -s "ingestProducts for ${ACCOUNT}@${DFO_MACHINE}: An ERROR may have occurred on ${DATE}" $OP_ADDRESS < $TMP_DIR/ip_mail
		fi
	else

# check for errors about already ingested files
		rm -f $TMP_DIR/ip_check2
		grep "already present" $DFO_LST_DIR/${ING_LOG} > $TMP_DIR/ip_check2
		if [ -s $TMP_DIR/ip_check2 ]
		then
			echo "... ingestion finished. ERROR: (some) files already ingested; new versions of the same file are NOT ingested. Find log in $DFO_LST_DIR/${ING_LOG} ." | tee -a $DFO_LST_DIR/${ING_LOG}
			if [ "$SEND_MAIL" = "YES" ]
			then
				echo "ingestProducts for ${ACCOUNT}@${DFO_MACHINE}: (some) files already ingested on ${DATE}; new versions of the same file are NOT ingested. Check $DFO_LST_DIR/${ING_LOG} ." > $TMP_DIR/ip_mail
				mail -s "ingestProducts for ${ACCOUNT}@${DFO_MACHINE}: WARNING occurred on ${DATE}" $OP_ADDRESS < $TMP_DIR/ip_mail
			fi
		else
			echo "  ... ingestion finished. No ERROR has occurred. Find log in $DFO_LST_DIR/${ING_LOG} ." | tee -a $DFO_LST_DIR/${ING_LOG}
		fi
	fi

	echo "  You may want to check the content for $DATE by calling 'productExplorer -d $DATE'."
	echo ""

# to align with	IDP scheme
	if [ $THIS_IS_PHOENIX = YES ]
	then
		cp $DFO_LST_DIR/${ING_LOG} $DFO_CAL_DIR/$DATE/INGESTED

# scp to qcweb for exported phoenixMonitor link
# (for SCIENCE this is done in call_IT)
		scp -o BatchMode=yes $DFO_CAL_DIR/$DATE/INGESTED ${DFO_WEB_SERVER}:${INSTR_DIR}/logs/$DATE 1>/dev/null
	fi
else
	echo "... no new files ingested. Either all files were already ingested, or a problem occurred."
	if [ "$SEND_MAIL" = "YES" ]
	then
		echo "ingestProducts for ${ACCOUNT}@${DFO_MACHINE}: $DATE $MODE - no new files ingested. Either all files were already ingested, or a problem occurred"  > $TMP_DIR/ip_mail
		mail -s "ingestProducts for ${ACCOUNT}@${DFO_MACHINE}: ${DATE} $MODE - no new files ingested" $OP_ADDRESS < $TMP_DIR/ip_mail
	fi
fi

# =========================================================================
# 2.8 PHOENIX=YES, MCALIB
#     Metrics, this ingestion (see sect. 1.2.3 note for development)
# =========================================================================
if [ $THIS_IS_PHOENIX = YES ] && [ $MODE = CALIB ]
then
	echo "- Calculating statistics, updating $DFO_MON_DIR/PHOENIX_DAILY_$RELEASE ..."
	rm -f $TMP_DIR/ip_daily_stats

# =========================================================================
# 2.8.1 Daily statistics
# =========================================================================
# for MCALIBs, we count only fits files, and all are the same; N_prod and size_prod is accumulated in PHOENIX_DAILY
	MONTH=`echo $DATE | cut -c 1-7`

# for MCALIBs, most if not all entries in PHOENIX_DAILY are actually per month. Very likely there is no entry for the particular DATE.
# We first sum up the existing stats in PHOENIX_DAILY for N_prod and size_prod for every date we call the tool (not 100% safe but hopefully good enough).
# Then we sum up all other entries for the dates of that month and ingest those numbers into daily_idpstat.
	if [ -s $DFO_MON_DIR/PHOENIX_DAILY_$RELEASE ]
	then
		grep "^$MONTH" $DFO_MON_DIR/PHOENIX_DAILY_$RELEASE > $TMP_DIR/ip_daily_stats
		if [ -s $TMP_DIR/ip_daily_stats ]
		then
			LAST_DATE=`cat $TMP_DIR/ip_daily_stats | awk '{print $1}' | sort -u | awk '{print $1}'`

			N_AB_ALL=`cat $TMP_DIR/ip_daily_stats | awk '{sum+=$2} END {print sum}'`
			N_AB_REJ=`cat $TMP_DIR/ip_daily_stats | awk '{sum+=$3} END {print sum}'`
			N_AB_EXE=`cat $TMP_DIR/ip_daily_stats | awk '{sum+=$4} END {print sum}'`
	
			T_AB_EXE=`cat $TMP_DIR/ip_daily_stats | awk '{sum+=$5} END {print sum}'`
			T_QC_EXE=`cat $TMP_DIR/ip_daily_stats | awk '{sum+=$6} END {print sum}'`

			N_SCI_RAW=`cat $TMP_DIR/ip_daily_stats | awk '{sum+=$7} END {print sum}'`
			MB_SCI_RAW=`cat $TMP_DIR/ip_daily_stats | awk '{sum+=$8} END {print sum}'`

# different logics: we sum up these fields in PHOENIX_DAILY, for every day of that month
			N_PROD=`ls $DFO_CAL_DIR/$DATE/*fits | wc -l`
			PREV_ENTRY=`grep $LAST_DATE $TMP_DIR/ip_daily_stats | awk '{print $9}'`

			if [ Q$PREV_ENTRY != QN_prod ] && [ Q$PREV_ENTRY != Q ]
			then
				N_PROD=`echo $N_PROD $PREV_ENTRY | awk '{print $1+$2}'`
# otherwise we enter $N_PROD
			fi	
			N_PROD_ALL=$N_PROD

			SIZE_PROD=`du -ks     $DFO_CAL_DIR/$DATE/*fits | awk '{sum+=$1} END {print sum/1024.}'`

			PREV_ENTRY=`grep $LAST_DATE $TMP_DIR/ip_daily_stats | awk '{print $10}'`
			if [ Q$PREV_ENTRY != Qsize_prod ] && [ Q$PREV_ENTRY != Q ]
			then
# otherwise we enter $SIZE_PROD
				PREV_ENTRY=`grep $LAST_DATE $TMP_DIR/ip_daily_stats | awk '{print $12}'`
				SIZE_PROD=`echo $SIZE_PROD $PREV_ENTRY | awk '{print $1+$2}'`
			fi	
			SIZE_PROD_ALL=$SIZE_PROD

			TODAY=`date +%Y-%m-%d`
			MJD=`qcdate $DATE`

# QC1 database: daily_idpstat
			echo "  Updating daily_idpstat for $PROC_INSTRUMENT ..."
			CIVIL_DATE=`cat $TMP_DIR/ip_daily_stats | awk '{print $1}'`
			CIVIL_DATE1=`echo $CIVIL_DATE | sed "s/-//g"`

			cat $TMP_DIR/ip_daily_stats |\
			 awk '{print "qc1Ingest -instrume qc -table daily_idpstat -mjd_obs",mjd,"-ingestion_date",today, \
 "-civil_date", civil_date,			\
 "-instrument", proc_instrument,	\
 "-instr_mode", instr_mode, 		\
 "-N_SCI_RAW",$7,		\
 "-MB_SCI_RAW",$8,		\
 "-N_SCI_PRO",n_prod,		\
 "-N_SCI_PRO_ALL",n_prod_all,	\
 "-MB_SCI_PRO",size_prod,	\
 "-MB_SCI_PRO_ALL",size_prod_all, \
 "-N_AB_ALL",$2,		\
 "-N_AB_REJ",$3,		\
 "-N_AB_EXE",$4,		\
 "-T_AB_EXE",$5,		\
 "-T_QC_EXE",$6			\
 }' civil_date=$CIVIL_DATE mjd=$MJD today=$TODAY proc_instrument=$PROC_INSTRUMENT instr_mode=$INSTR_MODE n_prod=$N_PROD n_prod_all=$N_PROD_ALL size_prod=$SIZE_PROD size_prod_all=$SIZE_PROD_ALL> $TMP_DIR/ip_new_entry

# dummy entry for pseudo_date (BWo, 2019-05-08; see Sect. 1.2.3)
			sed -i -e "s/-civil_date/-pseudo_date 2099-99-99 &/" $TMP_DIR/ip_new_entry

# before ingestion, we need to remove the previous entry	
			cat > $TMP_DIR/ip_delete <<EOT
delete 
from 
	qc1..daily_idpstat 
where 
	instrument = "$PROC_INSTRUMENT"
and 
	instr_mode = "$INSTR_MODE"
and 
	civil_date = "$CIVIL_DATE1"
go
EOT
       		 	isql -Uqc -P`cat $QC1_PWD` -S${QC1_SERVER} -i $TMP_DIR/ip_delete | grep -v affected
       		 	echo "  previous entry from $CIVIL_DATE removed."

			chmod u+x $TMP_DIR/ip_new_entry
			$TMP_DIR/ip_new_entry | sed "s/^.*/  &/"
			sed -i -e "/civil_date $CIVIL_DATE/d" $DFO_MON_DIR/backup_daily_qc1Ingest
			cat $TMP_DIR/ip_new_entry >> $DFO_MON_DIR/backup_daily_qc1Ingest

			if [ Q$PREV_ENTRY = Qsize_prod ] || [ Q$PREV_ENTRY = Q ]
			then
				sed -i -e "/^$LAST_DATE/s|N_prod[[:space:]]size_prod|$N_PROD	$N_PROD_ALL	$SIZE_PROD	$SIZE_PROD_ALL|" $DFO_MON_DIR/PHOENIX_DAILY_$RELEASE
				sed -i -e "/^$LAST_DATE/s|^.*|&	$TODAY|"	$DFO_MON_DIR/PHOENIX_DAILY_$RELEASE
			else
				NEW_ENTRY=`grep "^$LAST_DATE" $TMP_DIR/ip_daily_stats | awk '{print $1,$2,$3,$4,$5,$6,$7,$8,n_prod,n_prod_all,size_prod,size_prod_all,$13,today}' today=$TODAY n_prod=$N_PROD n_prod_all=$N_PROD_ALL size_prod=$SIZE_PROD size_prod_all=$SIZE_PROD_ALL | sed "s/ /	/g"`
				if [ "Q$NEW_ENTRY" != Q ]
				then
					sed -i -e "/^$LAST_DATE/s|^.*|$NEW_ENTRY|" $DFO_MON_DIR/PHOENIX_DAILY_$RELEASE
				else
					echo "*** WARNING: something went wrong with calculating stats for $LAST_DATE:  not contained in $DFO_MON_DIR/PHOENIX_DAILY_$RELEASE."
				fi
			fi
		else
			echo "*** WARNING: something went wrong with calculating stats for $MONTH: not contained in $DFO_MON_DIR/PHOENIX_DAILY_$RELEASE."
		fi
# no PHOENIX_DAILY table
	else
		N_PROD=`ls $DFO_CAL_DIR/$DATE/*fits | wc -l`
		SIZE_PROD=`du -ks     $DFO_CAL_DIR/$DATE/*fits | awk '{sum+=$1} END {print sum/1024.}'`

		echo "***ERROR: $DFO_MON_DIR/PHOENIX_DAILY_$RELEASE not found!
Ingestion statistics for date $DATE:
Number of ingested MCALIBs:	$N_PROD
Size of ingested MCALIBs:	$SIZE_PROD"
	fi

# =========================================================================
# 2.8.2 Monthly statistics
# =========================================================================
	echo "  Updating monthly_idpstat for $PROC_INSTRUMENT ..."
	FIRST_DATE1=`echo $TODAY | cut -c1-7`
	MJD=`qcdate ${FIRST_DATE1}-01`

	cat > $TMP_DIR/idp_query <<EOT
select distinct
	"${FIRST_DATE1}-01",
 	instrument, 
	instr_mode,
	sum(N_SCI_RAW),
	sum(MB_SCI_RAW)/1024.,
	sum(N_SCI_PRO),
	sum(N_SCI_PRO_ALL),
	sum(MB_SCI_PRO)/1024.,
	sum(MB_SCI_PRO_ALL)/1024.,
	sum(N_AB_ALL),
	sum(N_AB_REJ),
	sum(N_AB_EXE),
	sum(T_AB_EXE)/3600.,
	sum(T_QC_EXE)/3600.
from
	daily_idpstat
where
	instrument = "$PROC_INSTRUMENT"
and
	instr_mode = "$INSTR_MODE"
and
	convert(varchar(7),ingestion_date,112) = "$FIRST_DATE1"	
and
        visible = "Y"
group by
        instr_mode
go
EOT
	rm -f $TMP_DIR/idp_results
	isql -Uqc -P`cat $QC1_PWD` -S${QC1_SERVER} -w999 -i $TMP_DIR/idp_query |  grep -v affected | sed "1,2 d" | sed "/^$/d" > $TMP_DIR/idp_results

# now ingest sums into monthly_idpstat
	if [ -s $TMP_DIR/idp_results ]
	then
		cat $TMP_DIR/idp_results |\
	 awk '{print "qc1Ingest -instrume qc -table monthly_idpstat -mjd_obs",mjd, \
 "-civil_date", $1,             \
 "-instrument", $2,		\
 "-instr_mode", $3,     	\
 "-N_SCI_RAW",$4,               \
 "-GB_SCI_RAW",$5,              \
 "-N_SCI_PRO",$6,    		\
 "-N_SCI_PRO_ALL",$7,  		\
 "-GB_SCI_PRO",$8,  		\
 "-GB_SCI_PRO_ALL",$9,  	\
 "-N_AB_ALL",$10,                \
 "-N_AB_REJ",$11,                \
 "-N_AB_EXE",$12,                \
 "-T_AB_EXE",$13,                \
 "-T_QC_EXE",$14                 \
 }' mjd=$MJD > $TMP_DIR/idp_new_monthly_entry

		chmod u+x $TMP_DIR/idp_new_monthly_entry
		$TMP_DIR/idp_new_monthly_entry | sed "s/^.*/  &/"
	else
		echo "no data found."
	fi

# =========================================================================
# 2.8.3 Monthly stats for ALL_INS_SUM
# =========================================================================
	echo "  Updating monthly_idpstat for ALL_INS_SUM ..."
	FIRST_DATE2=`echo "${FIRST_DATE1}-01" | sed "s/-//"g`
	cat > $TMP_DIR/idp_query_all <<EOT
select distinct
	"$FIRST_DATE2",
	"ALL_INS_SUM",
	"ALL_INS_SUM",
	sum(N_SCI_RAW),
	sum(GB_SCI_RAW),
	sum(N_SCI_PRO),
	sum(N_SCI_PRO_ALL),
	sum(GB_SCI_PRO),
	sum(GB_SCI_PRO_ALL),
	sum(N_AB_ALL),
	sum(N_AB_REJ),
	sum(N_AB_EXE),
	sum(T_AB_EXE),
	sum(T_QC_EXE)
from
	monthly_idpstat
where
	instrument != "ALL_INS_SUM"
and
	instr_mode != "ALL_INS_SUM"
and
	convert(varchar(10),civil_date,112) = "$FIRST_DATE2"	
and
        visible = "Y"
go
EOT

	rm -f $TMP_DIR/idp_results_all
	isql -Uqc -P`cat $QC1_PWD` -S${QC1_SERVER} -w999 -i $TMP_DIR/idp_query_all |  grep -v affected | sed "1,2 d" | sed "/^$/d" > $TMP_DIR/idp_results_all

# ingest updated sum into monthly_idpstat
	if [ -s $TMP_DIR/idp_results_all ]
	then
		cat $TMP_DIR/idp_results_all |\
	 awk '{print "qc1Ingest -instrume qc -table monthly_idpstat -mjd_obs",mjd, \
 "-civil_date", $1,             \
 "-instrument", "ALL_INS_SUM",	\
 "-instr_mode", "ALL_INS_SUM",  	\
 "-N_SCI_RAW",$4,               \
 "-GB_SCI_RAW",$5,              \
 "-N_SCI_PRO",$6,    		\
 "-N_SCI_PRO_ALL",$7,  		\
 "-GB_SCI_PRO",$8,  		\
 "-GB_SCI_PRO_ALL",$9,  	\
 "-N_AB_ALL",$10,                \
 "-N_AB_REJ",$11,                \
 "-N_AB_EXE",$12,                \
 "-T_AB_EXE",$13,                \
 "-T_QC_EXE",$14                 \
 }' mjd=$MJD > $TMP_DIR/idp_new_monthly_entry

		chmod u+x $TMP_DIR/idp_new_monthly_entry
		$TMP_DIR/idp_new_monthly_entry | sed "s/^.*/  &/"
	fi
fi

# =========================================================================
# 3. update QC1 database
# 3.1 set QC1_UPDATE
# =========================================================================

QC1_UPDATE=`grep ^QC1_UPDATE $DFO_CONFIG_DIR/config.ingestProducts | awk '{print $2}'`
if [ "$QC1_UPDATE" != "YES" ] && [ "$QC1_UPDATE" != "NO" ]
then
	QC1_UPDATE=YES
fi

if [ "$QC1_UPDATE" = "NO" ]
then
	echo "*** INFO: QC1_UPDATE=${QC1_UPDATE}. No update of cdbfile columns in QC1 database."
fi

# =========================================================================
# 3.2 set cdbfile columns
# =========================================================================

if [ "$QC1_UPDATE" = "YES" ] && [ "$PRODUCTS_YN" = "YES" ]
then
	echo
	echo "Now updating QC1 database ..."

	rm -f $TMP_DIR/${UPD_LOG}

# search table names in QC1 database
	rm -f $TMP_DIR/ip_qc1tables

	if [ $THIS_IS_PHOENIX = YES ]
	then
		rm -f $TMP_DIR/ip_qc1tables
		grep "^QC1_TABLE" $DFO_CONFIG_DIR/config.ingestProducts | awk '{print $2}' > $TMP_DIR/ip_qc1tables
		if [ ! -s $TMP_DIR/ip_qc1tables ]
		then
			echo "*** INFO: no specific QC1 tables configured, we take the same list as for operations ..."
			qc1Ingest -instrume $QC1_INSTRUMENT | sed "1,6 d" | tr "," "\012" | awk '{print $1}' > $TMP_DIR/ip_qc1tables
		else
			echo "*** INFO: we take the list of configured QC1 tables ..."
		fi	
	else
		qc1Ingest -instrume $QC1_INSTRUMENT | sed "1,6 d" | tr "," "\012" | awk '{print $1}' > $TMP_DIR/ip_qc1tables
	fi

	if [ ! -s $TMP_DIR/ip_qc1tables ]
	then
		echo "*** WARNING: No tables for instrument $QC1_INSTRUMENT found in QC1 database."
		echo "             Update of QC1 database not possible."
	else

# define root for SQL calls
		rm -f $TMP_DIR/ip_query_root
		echo "use qc1" > $TMP_DIR/ip_query_root
		echo "go" >> $TMP_DIR/ip_query_root

# search for tables that have pipefile column
		rm -f $TMP_DIR/ip_query
		cat $TMP_DIR/ip_query_root > $TMP_DIR/ip_query
		for TABLE in `cat $TMP_DIR/ip_qc1tables`
		do
			echo "sp_columns $TABLE" >> $TMP_DIR/ip_query
			echo "go" >> $TMP_DIR/ip_query
		done

		isql -Uqc -P`cat $QC1_PWD` -S${QC1_SERVER} -w999 -i $TMP_DIR/ip_query -o $TMP_DIR/ip_table_columns

		rm -f $TMP_DIR/ip_pipefile_tables
		grep pipefile $TMP_DIR/ip_table_columns | awk '{print $3}' > $TMP_DIR/ip_pipefile_tables
		if [ ! -s $TMP_DIR/ip_pipefile_tables ]
		then
			echo "*** WARNING: No QC1 tables with pipefile column found."
			echo "             Update of QC1 database not possible."
		else

# find correct table name for each product file 

			cat $TMP_DIR/ip_query_root > $TMP_DIR/ip_query
			for F in $FILE_LIST
			do
				PIPEFILE=`grep $F $TMP_DIR/ip_file_list | awk '{print $3}'`
				if [ "Q$PIPEFILE" = "Q" ]
				then
					echo "*** WARNING: PIPEFILE header key not defined for $F"
					continue
				fi

				for TABLE in `cat $TMP_DIR/ip_pipefile_tables`
				do
					cat >> $TMP_DIR/ip_query <<EOT
print "&$TABLE"
select pipefile
from $TABLE
where pipefile = "$PIPEFILE"
and visible = "Y" 
go
EOT
				done
			done

			isql -Uqc -P`cat $QC1_PWD` -S${QC1_SERVER} -w999 -i $TMP_DIR/ip_query -o $TMP_DIR/ip_pipefile_search

			grep -v -e "----" $TMP_DIR/ip_pipefile_search | grep -v pipefile | tr -d "\n" | tr "&" "\n" | grep -v "0 rows affected" | grep row | grep affected > $TMP_DIR/ip_search_result
			echo >> $TMP_DIR/ip_search_result

# prepare update for each calib file
			cat $TMP_DIR/ip_query_root > $TMP_DIR/ip_update
			rm -f $TMP_DIR/ip_log1
			touch $TMP_DIR/ip_log1
			for F in $FILE_LIST
			do
				PIPEFILE=`grep $F $TMP_DIR/ip_file_list | awk '{print $3}'`
				if [ "Q$PIPEFILE" = "Q" ]
				then
					continue
				fi

				TABLE_LIST=`grep $PIPEFILE $TMP_DIR/ip_search_result | awk '{print $1}'`
				if [ "Q$TABLE_LIST" = "Q" ]
				then
# this is legal if a recipe produces more than one product that survives
					echo "*** INFO: no QC1 table found for $F (${PIPEFILE})" >> $TMP_DIR/ip_log1
					continue
				fi

				for TABLE in $TABLE_LIST
				do
					cat >> $TMP_DIR/ip_update <<EOT
print "Update $TABLE with cdbfile = $F where pipefile = ${PIPEFILE}"
update $TABLE
set cdbfile = "$F" 
where pipefile = "$PIPEFILE"
and visible = "Y"
go
EOT
				done
			done

# execute update
			rm -f $TMP_DIR/ip_log2
			isql -Uqc -P`cat $QC1_PWD` -S${QC1_SERVER} -w999 -i $TMP_DIR/ip_update -o $TMP_DIR/ip_log2
			cat $TMP_DIR/ip_log1 $TMP_DIR/ip_log2 > $TMP_DIR/${UPD_LOG}
		fi
	fi
fi

# =========================================================================
# 3.3 copy log, check for errors
# =========================================================================

if [ "$QC1_UPDATE" = "YES" ] && [ "$PRODUCTS_YN" = "YES" ]
then
	if [ -s $TMP_DIR/${UPD_LOG} ]
	then
# rows affected
		NUM_ROWS=`grep affected $TMP_DIR/${UPD_LOG} | sed "s/(//g" | awk '{sum+=$1} END {print sum}'`

# error check
		rm -f $TMP_DIR/ip_check
		grep -v affected $TMP_DIR/${UPD_LOG} | grep -v Update | grep -v "*** INFO" > $TMP_DIR/ip_check
		if [ -s $TMP_DIR/ip_check ]
		then
			mv $TMP_DIR/${UPD_LOG} $DFO_LST_DIR/
			echo "... updating finished. $NUM_ROWS row(s) affected. An ERROR may have occurred. Check $DFO_LST_DIR/${UPD_LOG} ." | tee -a $DFO_LST_DIR/${UPD_LOG}
			if [ "$SEND_MAIL" = "YES" ]
			then
				echo "ingestProducts for ${ACCOUNT}@${DFO_MACHINE}: an ERROR may have occurred during update of QC1 database on ${DATE}. Check $DFO_LST_DIR/${UPD_LOG} ." > $TMP_DIR/ip_mail
				mail -s "ingestProducts for ${ACCOUNT}@${DFO_MACHINE}: ERROR during update of QC1 database on ${DATE}" $OP_ADDRESS < $TMP_DIR/ip_mail
			fi
		else
			echo "... updating finished. $NUM_ROWS row(s) affected. No ERROR has occurred."
		fi
	else
		echo "... update of QC1 database UNSUCCESSFUL. No log file produced!"
		if [ "$SEND_MAIL" = "YES" ]
		then
			echo "ingestProducts for ${ACCOUNT}@${DFO_MACHINE}: update of QC1 database UNSUCCESSFUL on ${DATE}. No log file produced!" > $TMP_DIR/ip_mail
			mail -s "ingestProducts for ${ACCOUNT}@${DFO_MACHINE}: update of QC1 database UNSUCCESSFUL on ${DATE}" $OP_ADDRESS < $TMP_DIR/ip_mail
		fi
	fi
fi

# =========================================================================
# 4.  clean up, set DFO flag and exit
# 4.1 clean up entry in ING_FILE_NAME
# =========================================================================

if [ -s $DFO_JOB_DIR/$ING_FILE_NAME ]
then
# remove current entry
	sed -i -e "/ingestProducts -m $MODE -d $DATE/d" $DFO_JOB_DIR/$ING_FILE_NAME
fi

# =========================================================================
# 4.2 Set DFO flag
# =========================================================================

UPDATE=`date +%Y-%m-%d"T"%H:%M:%S`

case $MODE in
        "CALIB" )       DFO_STATUS="cal_Ingested" ;;
        "SCIENCE" )     DFO_STATUS="sci_Ingested" ;;
esac

echo "$DFO_STATUS $DATE $UPDATE" >> $DFO_MON_DIR/DFO_STATUS

# =========================================================================
# 4.3 If CALIB: write cleanupProducts call into JOB file
# =========================================================================

if [ $MODE = "CALIB" ]
then
	if [ ! -s $DFO_JOB_DIR/JOBS_CLEANUP ]
	then
		cat > $DFO_JOB_DIR/JOBS_CLEANUP <<EOT
#!/bin/sh
# This is the jobs file for cleanup products (after successful run of ingestProducts)

EOT
		chmod u+x $DFO_JOB_DIR/JOBS_CLEANUP
	fi
	CHECK_PREV=`grep "cleanupProducts -d $DATE" $DFO_JOB_DIR/JOBS_CLEANUP | head -1`
	if [ "Q$CHECK_PREV" = Q ]
	then
		echo "cleanupProducts -d $DATE" >> $DFO_JOB_DIR/JOBS_CLEANUP
	fi
fi

rm -f $DFO_BIN_DIR/fitsverify
exit 0
