Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use RLTT and schedule_stop in thermal calculation #344

Merged
merged 7 commits into from
May 18, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
151 changes: 100 additions & 51 deletions starcheck/calc_ccd_temps.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import numpy as np
import json
import yaml
from pathlib import Path

# Matplotlib setup
# Use Agg backend for command-line (non-interactive) operation
Expand All @@ -33,6 +34,7 @@
import Ska.DBI
import Ska.engarchive.fetch_sci as fetch
from Chandra.Time import DateTime
import kadi
import kadi.commands
import kadi.commands.states as kadi_states
import xija
Expand Down Expand Up @@ -97,7 +99,7 @@ def get_options():


def get_ccd_temps(oflsdir, outdir='out',
json_obsids=sys.stdin,
json_obsids=None,
model_spec=None, char_file=None, orlist=None,
run_start_time=None,
verbose=1, **kwargs):
Expand All @@ -109,23 +111,34 @@ def get_ccd_temps(oflsdir, outdir='out',
:param oflsdir: products directory
:param outdir: output directory for plots
:param json_obsids: file-like object or string containing JSON of
starcheck Obsid objects
:param model_spec: xija ACA model specification
:param run_start_time: Chandra.Time date used as a reference time to determine initial
seed state with temperature telemetry. The initial seed state will
be at the end of available telemetry that is also before run_start_time
and before the beginning of backstop cmds.
starcheck Obsid objects (default='<oflsdir>/starcheck/obsids.json')
:param model_spec: xija ACA model spec file (default=package aca_spec.json)
:param char_file: starcheck characteristics file (default=package characteristics.yaml)
:param run_start_time: Chandra.Time date, clock time when starcheck was run,
or a user-provided value (usually for regression testing).
:param verbose: Verbosity (0=quiet, 1=normal, 2=debug)
:param kwargs: extra args, including test_rltt and test_sched_stop for testing

:returns: JSON dictionary of labeled dwell intervals with max temperatures
"""
if not os.path.exists(outdir):
os.mkdir(outdir)

module_dir = Path(__file__).parent
if model_spec is None:
model_spec = str(module_dir / 'data' / 'aca_spec.json')
if char_file is None:
char_file = str(module_dir / 'data' / 'characteristics.yaml')

if json_obsids is None:
# Only happens in testing, so use existing obsids file in OFLS dir
json_obsids = Path(oflsdir, 'starcheck', 'obsids.json').read_text()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I suppose if one uses a local oflsdir one would not need to presume that the tested changes in local starcheck weren't causing diffs in the obsids.json.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't understand this.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we were to change something about the timing of the obsids (saved in that obsids.json file) with a change in starcheck, the new 'test code' here in calc_ccd_temps.py probably wouldn't get the right answer to test a new starhceck release candidate if you use /data/mpcrit1/mplogs oflsdirs for input. I think I was saying, that your test fixes in calc_ccd_temps.py are great but they don't solve all integration testing issues with the Perl pieces of starcheck.pl.


run_start_time = DateTime(run_start_time)
config_logging(outdir, verbose)

# Store info relevant to processing for use in outputs
proc = {'run_user': os.environ['USER'],
proc = {'run_user': os.environ.get('USER'),
'execution_time': time.ctime(),
'run_start_time': run_start_time,
'errors': []}
Expand All @@ -135,6 +148,7 @@ def get_ccd_temps(oflsdir, outdir='out',
% (TASK_NAME, proc['execution_time'], proc['run_user']))
logger.info("# Continuity run_start_time = {}".format(run_start_time.date))
logger.info('# {} version = {}'.format(TASK_NAME, VERSION))
logger.info(f'# kadi version = {kadi.__version__}')
logger.info('###############################'
'######################################\n')

Expand All @@ -150,41 +164,70 @@ def get_ccd_temps(oflsdir, outdir='out',
except TypeError:
sc_obsids = json.load(json_obsids)

# Get tstart, tstop, commands from backstop file in opt.oflsdir
# Get commands from backstop file in oflsdir
bs_cmds = get_bs_cmds(oflsdir)
tstart = DateTime(bs_cmds[0]['date']).secs
tstop = DateTime(bs_cmds[-1]['date']).secs
proc['datestart'] = DateTime(tstart).date
proc['datestop'] = DateTime(tstop).date

# Get temperature telemetry for 1 days prior to
# min(last available telem, backstop tstart, run_start_time)
# where run_start_time is for regression testing.
end_time = fetch.get_time_range('aacccdpt', format='secs')[1]
tlm = get_telem_values(min(end_time, tstart, run_start_time.secs),
['aacccdpt'],
days=1)

states = get_week_states(tstart, tstop, bs_cmds, tlm)

# If the last obsid interval extends over the end of states
# extend the state / predictions
if ((states[-1]['obsid'] == sc_obsids[-1]['obsid']) &
(sc_obsids[-1]['obs_tstop'] > states[-1]['tstop'])):
tstop = sc_obsids[-1]['obs_tstop']
states[-1]['tstop'] = sc_obsids[-1]['obs_tstop']
states[-1]['datestop'] = DateTime(sc_obsids[-1]['obs_tstop']).date

if tstart > DateTime(MODEL_VALID_FROM).secs:
times, ccd_temp = make_week_predict(model_spec, states, tstop)
bs_dates = bs_cmds['date']

# Running loads termination time is the last time of "current running loads"
# (or in the case of a safing action, "current approved load commands" in
# kadi commands) which should be included in propagation. Starting from
# around 2020-April this is included as a commmand in the loads, while prior
# to that we just use the first command in the backstop loads.
ok = bs_cmds['event_type'] == 'RUNNING_LOAD_TERMINATION_TIME'
rltt = DateTime(bs_dates[ok][0] if np.any(ok) else bs_dates[0])

# First actual command in backstop loads (all the NOT-RLTT commands)
bs_start = DateTime(bs_dates[~ok][0])

# Scheduled stop time is the end of propagation, either the explicit
# time as a pseudo-command in the loads or the last backstop command time.
ok = bs_cmds['event_type'] == 'SCHEDULED_STOP_TIME'
sched_stop = DateTime(bs_dates[ok][0] if np.any(ok) else bs_dates[-1])

if 'test_rltt' in kwargs:
rltt = DateTime(kwargs['test_rltt'])
if 'test_sched_stop' in kwargs:
sched_stop = DateTime(kwargs['test_sched_stop'])

logger.info(f'RLTT = {rltt.date}')
logger.info(f'sched_stop = {sched_stop.date}')

proc['datestart'] = bs_start.date
proc['datestop'] = sched_stop.date

# Get temperature telemetry for 1 day prior to min(last available telem,
# backstop start, run_start_time) where run_start_time is for regression
# testing.
tlm_end_time = min(fetch.get_time_range('aacccdpt', format='secs')[1],
bs_start.secs, run_start_time.secs)
tlm = get_telem_values(tlm_end_time, ['aacccdpt'], days=1)
states = get_week_states(rltt, sched_stop, bs_cmds, tlm)

# If the last obsid interval extends over the end of states then extend the
# state / predictions. In the absence of something useful like
# SCHEDULED_STOP, if the schedule ends in NPNT (and has no maneuver in
# backstop to define end time), the obsid stop time for the last observation
# in the schedule might be set from the stop time listed in the processing
# summary. Going forward from backstop 6.9 this clause is likely not being
# run.
last_state = states[-1]
last_sc_obsid = sc_obsids[-1]
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't understand the circumstances where starcheck would make an obsid interval that extends beyond the end of commanding. With SCHEDULED_STOP in backstop this bit is not needed, but wondering if it is needed for regression testing or whatnot.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

IIRC in the absence of something useful like SCHEDULED_STOP, if the schedule ended in NPNT (and had no maneuver in backstop to define end time), the obsid stop time for the last observation in the schedule was sometimes set from the stop time listed in the processing summary? Maybe?

I remember spending a lot of time on corner cases to get it to not just fail out. Should be moot if we can do a modern hopper transition and better state building but there you go.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I added your text as a comment to the code for our future selves.

if ((last_state['obsid'] == last_sc_obsid['obsid']) &
(last_sc_obsid['obs_tstop'] > last_state['tstop'])):
obs_tstop = last_sc_obsid['obs_tstop']
last_state['tstop'] = obs_tstop
last_state['datestop'] = DateTime(obs_tstop).date

if rltt.date > DateTime(MODEL_VALID_FROM).date:
ccd_times, ccd_temps = make_week_predict(model_spec, states, sched_stop)
else:
times, ccd_temp = mock_telem_predict(states)
ccd_times, ccd_temps = mock_telem_predict(states)

make_check_plots(outdir, states, times,
ccd_temp, tstart, tstop, char=char)
make_check_plots(outdir, states, ccd_times, ccd_temps,
tstart=bs_start.secs, tstop=sched_stop.secs, char=char)
intervals = get_obs_intervals(sc_obsids)
obsreqs = None if orlist is None else {obs['obsid']: obs for obs in read_or_list(orlist)}
obstemps = get_interval_data(intervals, times, ccd_temp, obsreqs)
obstemps = get_interval_data(intervals, ccd_times, ccd_temps, obsreqs)
return json.dumps(obstemps, sort_keys=True, indent=4,
cls=NumpyAwareJSONEncoder)

Expand Down Expand Up @@ -290,32 +333,33 @@ def calc_model(model_spec, states, start, stop, aacccdpt=None, aacccdpt_times=No
return model


def get_week_states(tstart, tstop, bs_cmds, tlm):
def get_week_states(rltt, sched_stop, bs_cmds, tlm):
"""
Make states from last available telemetry through the end of the backstop commands
Make states from last available telemetry through the end of the schedule

:param tstart: start time from first backstop command
:param tstop: stop time from last backstop command
:param rltt: running load termination time (discard running load commands after rltt)
:param sched_stop: create states out through scheduled stop time
:param bs_cmds: backstop commands for products under review
:param tlm: available pitch and aacccdpt telemetry recarray from fetch
:returns: numpy recarray of states
"""
# Get temperature data at the end of available telemetry
ok = tlm['time'] > tlm['time'][-1] - 1400
init_aacccdpt = np.mean(tlm['aacccdpt'][ok])
init_tlm_time = np.mean(tlm['time'][ok])
times = tlm['time']
i0 = np.searchsorted(times, times[-1] - 1400)
init_aacccdpt = np.mean(tlm['aacccdpt'][i0:])
init_tlm_time = np.mean(tlm['time'][i0:])

# Get commands from last telemetry up to (but not including)
# first backstop command.
cmds = kadi.commands.get_cmds(init_tlm_time, tstart)
# Get currently running (or approved) commands from last telemetry up to
# and including commands at RLTT
cmds = kadi.commands.get_cmds(init_tlm_time, rltt, inclusive_stop=True)

# Add in the backstop commands
cmds = cmds.add_cmds(bs_cmds)

# Get the states for available commands. This automatically gets continuity.
state_keys = ['obsid', 'pitch', 'q1', 'q2', 'q3', 'q4', 'eclipse']
states = kadi_states.get_states(cmds=cmds, state_keys=state_keys,
merge_identical=True)
states = kadi_states.get_states(cmds=cmds, start=init_tlm_time, stop=sched_stop,
Copy link
Contributor

@jeanconn jeanconn May 15, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I had been thinking of the get_states changes as just a convenience. Here, now that start is passed, it looks like the first returned state in the (propagation interval of the) test load I'm viewing (JAN117A), now starts 15 minutes earlier at init_tlm_time instead of the time of cmds[0]. I think that is totally fine for starcheck's thermal model but I wasn't expecting the change.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

More than fine, it is now correct. Previously the propagation temperature from available telemetry was being applied starting at the wrong time. In most circumstances it didn't really matter, but it is theoretically possible that the first command could be many hours after init_tlm_time, e.g. during a long observation between comms.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That was my thinking as well (that it is more correct), though I figured it was worth calling out in discussion in the PR as it is a functional change that was not mentioned in the PR top matter.

state_keys=state_keys, merge_identical=True)

states['tstart'] = DateTime(states['datestart']).secs
states['tstop'] = DateTime(states['datestop']).secs
Expand Down Expand Up @@ -435,6 +479,10 @@ def emit(self, record):
logger = logging.getLogger(TASK_NAME)
logger.setLevel(loglevel)

# Remove existing handlers if calc_ccd_temps is called multiple times
for handler in list(logger.handlers):
logger.removeHandler(handler)

formatter = logging.Formatter('%(message)s')

console = logging.StreamHandler()
Expand Down Expand Up @@ -507,7 +555,8 @@ def make_check_plots(outdir, states, times, temps, tstart, tstop, char):
:param states: commanded states
:param times: time stamps (sec) for temperature arrays
:param temps: dict of temperatures
:param tstart: load start time
:param tstart: load start time (secs)
:param tstop: schedule stop time (secs)
:rtype: dict of review information including plot file names
"""
plots = {}
Expand Down
6 changes: 6 additions & 0 deletions starcheck/src/starcheck.pl
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,13 @@
} else { warning("Could not find Maneuver Error file in output/ directory\n") };


# Get an initial dither state from kadi. Dither states are then built from backstop commands
# after this time. If the running loads will be terminated in advance of new commands in the loads
# in review, and the RUNNING_LOAD_TERMINATION_TIME backstop "pseudo" command is available, that
# command will be the first command ($bs[0]) and the kadi dither state will be fetched at that time.
# This is expected and appropriate.
my $kadi_dither = get_dither_kadi_state($bs[0]->{date});

# Read DITHER history file and backstop to determine expected dither state
my ($dither_error, $dither) = Ska::Parse_CM_File::dither($dither_file, \@bs, $kadi_dither);

Expand Down