# Daily log of how many messages have been received and lost.
# Two lines are logged: first, from the IRD, and then from 'receiverd'.
# "msgstats: ird
# "msgstats: 0 0 0 0 tmo(0) !reord() !exec() compl() recvd() int() epmm(2)"

# imports duplicated from 'hc_monitor.py' to enable standalone script testing:
#import sys
#from twccommon import Log
#from twccommon import IOCatcher

# script-specific imports
import string
import time
import twc.dsmarshal as dsm
from twc.Jobs import Job
import twccommon.corba
import twccommon.corba.TWCCore as TWCCore
from omniORB import CORBA
import twc.IrdInterface


class MsgStatsJob(Job):

    def __init__(self, jobName):
        Job.__init__(self, "MsgStats")
        self.jobName = jobName


    def getParams(self):
        return "processName = %s"  % (self.jobName,)


    def execute(self):
        pass


    def computeEpmm(self, dVals):
        errCnt = 0
        errors = dVals[0:6]
        msgCnt = int(dVals[8])

        for err in errors:
            errCnt += int(err)

        if msgCnt == 0:
            return 0
        else:
            return  (errCnt / msgCnt) * (1000000/ msgCnt)


class MsgStats(MsgStatsJob):

    def __init__(self, procName, portName):
        self.oldValues   = 10 * [0L]     # initialize all 10 receiverd mon-tgts
        self.orb = twccommon.corba.getOrb()
        self.portName = portName
        MsgStatsJob.__init__(self, procName)
        
    
    def queryReceiverd(self):
        try:
            monTarget = self.orb.string_to_object( \
                   'corbaname::localhost:4000#MonitorTargets/%s' %
                   self.jobName)

            listOfStrings = monTarget.getValues(
                      ['%s_BadPktHdrLength' % self.portName,   \
                       '%s_BadPktHdrProtocol' % self.portName, \
                       '%s_NoneMsgHdrType' % self.portName,    \
                       '%s_BadMsgHdrType' % self.portName,     \
                       '%s_MsgTimeout' % self.portName,        \
                       '%s_MsgQueueExceeded' % self.portName,  \
                       '%s_BadExecMsgs' % self.portName,       \
                       '%s_CompletedMsgs' % self.portName,     \
                       '%s_ReceivedMsgs' % self.portName ,      \
                       'all_InterestingMsgs'])
        except:
            Log.critical("queryReceiverd exception %s" % self.jobName)
            maxNumExceptions = 3
            writeToSyslog = IOCatcher.IOLogger(Log.critical)
            traceback.print_exc(maxNumExceptions, writeToSyslog)
            return None

        listOfIntegers = map(string.atoi, listOfStrings)
        return listOfIntegers

    def execute(self):
        """entry point from Job"""
        # Schedule to run at the top of the next hour
        (yr, mth, day, hr, min, sec, wkday, doy, dst) = time.localtime(time.time())
        offset = 3600 - ((min * 60) + sec)
        if (offset < 60):
                offset = 60
        Log.info("MsgStats re-scheduling to run next in %d sec" % offset)
        self.scheduleIn(offset)
        self.reschedule()  # resched now, so next pass will always run

        qValues  = self.queryReceiverd()
        if qValues == None:
            Log.warning('job_monitor_msgstats ran, but Receiverd query failed')
            return

        ii = 0
        reset = 0
        deltaValues = 10 * [0L]

        for vv in qValues:
            deltaValues[ii] = vv - self.oldValues[ii]
            if vv < self.oldValues[ii]:
                reset = 1
            ii = ii + 1

        self.oldValues = qValues

        if reset:
            Log.info('msgStats: Counters have been reset')
            return

        #             - - - Abbreviation Decoder - - -
        # 0 0 0 0 = Four bug-class error counts.  Will be zero unless a host or
        #           istar bug gets released.  (data loss cannot cause these).
        # tmo     = Msg timeouts, waiting for the next pkt in a single msg.
        # !reord  = Msgs that can't be reordered by the current depth of the
        #           reorder queue (includes missing pkts that never arrive from
        #           the middle of a msg, unless the whole msg times-out first).
        # !exec   = Msgs containing a non-Interest segemnt with an exec failure.
        #           Does not count Interest segment (seg 0) evals of '0'.
        # compl   = Total count of completely received msgs (whether
        #           interesting or not).
        # recvd   = Cnt of all msg headers seen, even if the rest of the msg
        #           was not completely received.
        # int     = Mgs that this particular Star found 'interesting'.
        # epmm    = 'errors per million messages'. Does not include '!exec'
        #           errors, since it is trying to measure satellite data loss.
        #           For epmm, a 1GB msg counts the same as a 100 byte msg.

        # TODO
        # epgb    = 'errors per gigabyte'.  Assumes all cmd msgs are 100 bytes,
        #           but does count the actual bytes in a file transfer.

        epmm = self.computeEpmm(deltaValues) 
        epgb = 0
        results = (self.jobName,) + tuple(deltaValues) + (epmm, epgb)
        msg = "%s: %d %d %d %d tmo(%d) !reord(%d) exec0(%d) " \
              "compl(%d) recvd(%d) int(%d) epmm(%3.1f) epgb(%d)" % results
        Log.info(msg)


class IrdMsgStats(MsgStatsJob):

    def __init__(self):
        MsgStatsJob.__init__(self, 'IRD')


    def execute(self):
        """entry point from Job"""
        # Schedule to run at the top of the next hour
        (yr, mth, day, hr, min, sec, wkday, doy, dst) = time.localtime(time.time())
        offset = 3600 - ((min * 60) + sec)
        if (offset < 60):
                offset = 60
        Log.info("IrdMsgStats re-scheduling to run next in %d sec" % offset)
        self.scheduleIn(offset)
        self.reschedule()  # resched now, so next pass will always run

        ds.init()

        try:
            irdOutput = twc.IrdInterface.getSignalStats()
            Log.info(irdOutput)
        except Exception, e:
            Log.warning('IRD SignalStat SNMP query failed: %s' % (e,))

        ds.uninit()

    
# Schedule to run at the top of the next hour
(yr, mth, day, hr, min, sec, wkday, doy, dst) = time.localtime(time.time())
offset = 3600 - ((min * 60) + sec)
if (offset < 60):
        offset = 60

Log.info("MsgStats scheduling to run next in %d sec" % offset)
Log.info("IrdMsgStats scheduling to run next in %d sec" % offset)

# (the arg identifies the monitor-target process name)
MsgStats("receiverd", "host").scheduleIn(offset)
MsgStats("receiverdPri", "hostPri").scheduleIn(offset)
IrdMsgStats().scheduleIn(offset)
