diff --git a/notebooks_tsqr/NightLog.ipynb b/notebooks_tsqr/NightLog.ipynb
index e5657f3..332b0b5 100644
--- a/notebooks_tsqr/NightLog.ipynb
+++ b/notebooks_tsqr/NightLog.ipynb
@@ -5,7 +5,7 @@
"id": "0",
"metadata": {},
"source": [
- "# Night Log"
+ "# Initialization"
]
},
{
@@ -15,11 +15,20 @@
"metadata": {},
"outputs": [],
"source": [
- "# Parameters. Set defaults here.\n",
+ "# Parameters. \n",
"# Times Square replaces this cell with the user's parameters.\n",
- "record_limit = '999'\n",
- "day_obs = 'TODAY' # TODAY, YESTERDAY, YYYY-MM-DD\n",
- "number_of_days = '1' # Total number of days of data to display (ending on day_obs)"
+ "\n",
+ "# The run-time defaults for all of these parameters are in NightLog.yaml\n",
+ "# Under Times Square, the run-time defaults always override values given here.\n",
+ "# Values here are used for local tests.\n",
+ "\n",
+ "# day_obs values: TODAY, YESTERDAY, YYYY-MM-DD\n",
+ "# Report on observing nights that start upto but not included this day.\n",
+ "day_obs = '2024-09-04' # Value to use for local testing (Summit)\n",
+ "#!day_obs = 'TODAY' # TODO Change to 'TODAY' to test with default before push \n",
+ "\n",
+ "# Total number of days of data to display (ending on day_obs)\n",
+ "number_of_days = '1' # TODO Change to '1' to test with default before push "
]
},
{
@@ -33,12 +42,28 @@
"import requests\n",
"from collections import defaultdict\n",
"import pandas as pd\n",
- "from pprint import pp\n",
+ "from pprint import pp, pformat\n",
"from urllib.parse import urlencode\n",
- "from IPython.display import display, Markdown, display_markdown\n",
"from matplotlib import pyplot as plt\n",
"import os\n",
- "from datetime import datetime, date, timedelta"
+ "from datetime import datetime, date, timedelta\n",
+ "\n",
+ "# When running under Times Square, install pkg from github.\n",
+ "# Otherwise use what is installed locally (intended to be dev editiable pkg)\n",
+ "if os.environ.get('EXTERNAL_INSTANCE_URL'):\n",
+ " print('Installing \"lsst.ts.logging_and_reporting\" from github using \"prototype\" branch....')\n",
+ " !pip install --upgrade git+https://github.com/lsst-ts/ts_logging_and_reporting.git@prototype >/dev/null\n",
+ "import lsst.ts.logging_and_reporting.source_adapters as sad\n",
+ "import lsst.ts.logging_and_reporting.almanac as alm\n",
+ "import lsst.ts.logging_and_reporting.reports as rep \n",
+ "import lsst.ts.logging_and_reporting.utils as ut\n",
+ "from lsst.ts.logging_and_reporting.reports import md,mdlist\n",
+ "\n",
+ "try:\n",
+ " import lsst.ts.logging_and_reporting.version\n",
+ " lrversion = lsst.ts.logging_and_reporting.version.__version__\n",
+ "except:\n",
+ " lrversion = 'LIVE'"
]
},
{
@@ -48,33 +73,21 @@
"metadata": {},
"outputs": [],
"source": [
- "limit = int(record_limit)\n",
- "\n",
- "match day_obs.lower():\n",
- " case 'today':\n",
- " date = datetime.now().date()\n",
- " case 'yesterday':\n",
- " date = datetime.now().date()-timedelta(days=1)\n",
- " case _:\n",
- " date = datetime.strptime(dd, '%Y-%m-%d').date()\n",
+ "# Normalize Parameters (both explicit Times Squares params, in implicit ones)\n",
+ "limit = 500 # YAGNI for Auto get more if this isn't enough to get all requested DAYS\n",
"\n",
+ "date = ut.get_datetime_from_day_obs_str(day_obs)\n",
+ "# date: is EXLUSIVE (upto, but not including)\n",
"days = int(number_of_days)\n",
"\n",
"# Thus: [min_day_obs,max_day_obs)\n",
- "min_day_obs = (date - timedelta(days=days-1)).strftime('%Y%m%d') # Inclusive\n",
- "max_day_obs = (date + timedelta(days=1)).strftime('%Y%m%d') # prep for Exclusive\n",
+ "# Format: string, YYYY-MM-DD\n",
+ "min_day_obs = (date - timedelta(days=days-1)).strftime('%Y-%m-%d') # Inclusive\n",
+ "max_day_obs = (date + timedelta(days=1)).strftime('%Y-%m-%d') # prep for Exclusive\n",
"\n",
"response_timeout = 3.05 # seconds, how long to wait for connection\n",
"read_timeout = 20 # seconds\n",
- "timeout = (float(response_timeout), float(read_timeout))\n",
- "\n",
- "summit = 'https://summit-lsp.lsst.codes'\n",
- "usdf = 'https://usdf-rsp-dev.slac.stanford.edu'\n",
- "tucson = 'https://tucson-teststand.lsst.codes'\n",
- "\n",
- "# Use server=tucson for dev testing.\n",
- "# Use server=usdf for push to develop. TODO\n",
- "server = os.environ.get('EXTERNAL_INSTANCE_URL', usdf)"
+ "timeout = (float(response_timeout), float(read_timeout))"
]
},
{
@@ -84,607 +97,199 @@
"metadata": {},
"outputs": [],
"source": [
- "print(f'Report from {server} over {number_of_days} nights'\n",
- " f' from {min_day_obs} to {date}. ')"
+ "# Set default env to \"usdf\" and try before PUSH to repo.\n",
+ "summit = 'https://summit-lsp.lsst.codes'\n",
+ "usdf = 'https://usdf-rsp-dev.slac.stanford.edu'\n",
+ "tucson = 'https://tucson-teststand.lsst.codes'\n",
+ "\n",
+ "# The default provided here is for local testing.\n",
+ "# Under Times Square it is ignored.\n",
+ "server = os.environ.get('EXTERNAL_INSTANCE_URL', usdf) # TODO try with \"usdf\" before push (else \"summit\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "5",
+ "metadata": {},
+ "source": [
+ "# Overview"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "6",
+ "metadata": {},
+ "source": [
+ "The only environment that has everything needed for this page is\n",
+ "https://summit-lsp.lsst.codes\n",
+ "\n",
+ "However, Times Square **does not** run on the Summit. It **does** run on USDF-dev. USDF doesn't fully support all the data sources we need so some functionality is currently missing on this page."
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "5",
+ "id": "7",
"metadata": {},
"outputs": [],
"source": [
- "# For Times Square, comment out next line and past next cell with contents of local python file.\n",
- "#! from lsst.ts.logging_and_reporting.source_adapters import ExposurelogAdapter, NarrativelogAdapter, keep_fields\n",
- "# Once our logrep package has been installed in RSP, we can use the simpler \"import\""
+ "# Display overview of Report context \n",
+ "md(f'''\n",
+ "Report for **{date}** covering the previous **{days}** observing night(s).\n",
+ "- Run on logs from **{server}/**\n",
+ "- Using *Prototype* Logging and Reporting Version: **{lrversion}**\n",
+ "''')\n",
+ "\n",
+ "endpoint_urls_str = '\\n- '.join(['',*sad.all_endpoints(server)])\n",
+ "md(f'This report will attempt to use the following log sources: {endpoint_urls_str}')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8",
+ "metadata": {},
+ "source": [
+ "# DDV"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "6",
+ "id": "9",
"metadata": {},
"outputs": [],
"source": [
- "# TODO\n",
- "# Comment out the import in the cell above this one.\n",
- "# Paste contents of source_adapters.py in new cell below this one."
+ "DDV = f\"{server}/rubintv-dev/ddv/index.html\" if 'summit' in server else f\"{server}/rubintv/ddv/index.html\"\n",
+ "md(f'Access DDV part of RubinTV: {DDV}')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "10",
+ "metadata": {},
+ "source": [
+ "# Almanac"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "7",
+ "id": "11",
"metadata": {},
"outputs": [],
"source": [
- "# This file is part of ts_logging_and_reporting.\n",
- "#\n",
- "# Developed for Vera C. Rubin Observatory Telescope and Site Systems.\n",
- "# This product includes software developed by the LSST Project\n",
- "# (https://www.lsst.org).\n",
- "# See the COPYRIGHT file at the top-level directory of this distribution\n",
- "# for details of code ownership.\n",
- "#\n",
- "# This program is free software: you can redistribute it and/or modify\n",
- "# it under the terms of the GNU General Public License as published by\n",
- "# the Free Software Foundation, either version 3 of the License, or\n",
- "# (at your option) any later version.\n",
- "#\n",
- "# This program is distributed in the hope that it will be useful,\n",
- "# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
- "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
- "# GNU General Public License for more details.\n",
- "#\n",
- "# You should have received a copy of the GNU General Public License\n",
- "# along with this program. If not, see .\n",
- "\n",
- "\n",
- "############################################\n",
- "# Python Standard Library\n",
- "from urllib.parse import urlencode\n",
- "import itertools\n",
- "from datetime import datetime\n",
- "from warnings import warn\n",
- "from collections import defaultdict\n",
- "from abc import ABC\n",
- "\n",
- "############################################\n",
- "# External Packages\n",
- "import requests\n",
- "\n",
- "MAX_CONNECT_TIMEOUT = 3.1 # seconds\n",
- "MAX_READ_TIMEOUT = 90 * 60 # seconds\n",
- "\n",
- "def keep_fields(outfields, recs):\n",
- " \"\"\"Keep only keys in OUTFIELDS list of RECS (list of dicts)\n",
- " SIDE EFFECT: Removes extraneous keys from all dicts in RECS.\n",
- " \"\"\"\n",
- " if outfields:\n",
- " for rec in recs:\n",
- " nukefields = set(rec.keys()) - set(outfields)\n",
- " print(f'{rec=} {nukefields=}')\n",
- " for f in nukefields:\n",
- " del rec[f]\n",
- "\n",
- "class SourceAdapter(ABC):\n",
- " \"\"\"Abstract Base Class for all source adapters.\n",
- " \"\"\"\n",
- " # TODO document class including all class variables.\n",
- " def __init__(self, *,\n",
- " server_url='https://tucson-teststand.lsst.codes',\n",
- " connect_timeout=1.05, # seconds\n",
- " read_timeout=2, # seconds\n",
- " ):\n",
- " self.server = server_url\n",
- " self.c_timeout = min(MAX_CONNECT_TIMEOUT,\n",
- " float(connect_timeout)) # seconds\n",
- " self.r_timeout = min(MAX_READ_TIMEOUT, # seconds\n",
- " float(read_timeout))\n",
- " self.timeout = (self.c_timeout, self.r_timeout)\n",
- "\n",
- " # Provide the following in subclass\n",
- " output_fields = None\n",
- " service = None\n",
- " endpoints = None\n",
- "\n",
- " @property\n",
- " def source_url(self):\n",
- " return f'{self.server}/{self.service}'\n",
- "\n",
- "\n",
- " def check_endpoints(self, timeout=None):\n",
- " to = (timeout or self.timeout)\n",
- " print(f'Try connect to each endpoint of {self.server}/{self.service} '\n",
- " f'using timeout={to}.')\n",
- " url_http_status_code = dict()\n",
- " for ep in self.endpoints:\n",
- " url = f'{self.server}/{self.service}/{ep}'\n",
- " try:\n",
- " r = requests.get(url, timeout=(timeout or self.timeout))\n",
- " except:\n",
- " url_http_status_code[url] = 'timeout'\n",
- " else:\n",
- " url_http_status_code[url] = r.status_code\n",
- " return url_http_status_code\n",
- "\n",
- "\n",
- " def analytics(self, recs, categorical_fields=None):\n",
- " if len(recs) == 0:\n",
- " return dict(fields=[],\n",
- " facet_fields=set(),\n",
- " facets=dict())\n",
- "\n",
- " non_cats = set([\n",
- " 'tags', 'urls', 'message_text', 'id', 'date_added',\n",
- " 'obs_id', 'day_obs', 'seq_num', 'parent_id', 'user_id',\n",
- " 'date_invalidated', 'date_begin', 'date_end',\n",
- " 'time_lost', # float\n",
- " # 'systems','subsystems','cscs', # values need special handling\n",
- " ])\n",
- " flds = set(recs[0].keys())\n",
- " if not categorical_fields:\n",
- " categorical_fields = flds\n",
- " ignore_fields = flds - categorical_fields\n",
- " facflds = flds - ignore_fields\n",
- "\n",
- " # facets(field) = set(value-1, value-2, ...)\n",
- " facets = {fld: set([str(r[fld])\n",
- " for r in recs if not isinstance(r[fld], list)])\n",
- " for fld in facflds}\n",
- " return dict(fields=flds,\n",
- " facet_fields=facflds,\n",
- " facets=facets,\n",
- " )\n",
- "\n",
- "\n",
- "# Not available on SLAC (usdf) as of 9/9/2024.\n",
- "class NightReportAdapter(SourceAdapter):\n",
- " service = \"nightreport\"\n",
- " endpoints = ['reports']\n",
- " primary_endpoint = 'reports'\n",
- "\n",
- "class NarrativelogAdapter(SourceAdapter):\n",
- " \"\"\"TODO full documentation\n",
- " \"\"\"\n",
- " service = 'narrativelog'\n",
- " endpoints = ['messages',]\n",
- " primary_endpoint = 'messages'\n",
- " fields = {'category',\n",
- " 'components',\n",
- " 'cscs',\n",
- " 'date_added',\n",
- " 'date_begin',\n",
- " 'date_end',\n",
- " 'date_invalidated',\n",
- " 'id',\n",
- " 'is_human',\n",
- " 'is_valid',\n",
- " 'level',\n",
- " 'message_text',\n",
- " 'parent_id',\n",
- " 'primary_hardware_components',\n",
- " 'primary_software_components',\n",
- " 'site_id',\n",
- " 'subsystems',\n",
- " 'systems',\n",
- " 'tags',\n",
- " 'time_lost',\n",
- " 'time_lost_type',\n",
- " 'urls',\n",
- " 'user_agent',\n",
- " 'user_id'}\n",
- " filters = {\n",
- " 'site_ids',\n",
- " 'message_text', # Message text contain ...\n",
- " 'min_level', # inclusive\n",
- " 'max_level', # exclusive\n",
- " 'user_ids',\n",
- " 'user_agents',\n",
- " 'categories',\n",
- " 'exclude_categories',\n",
- " 'time_lost_types',\n",
- " 'exclude_time_lost_types',\n",
- " 'tags', # at least one must be present.\n",
- " 'exclude_tags', # all must be absent\n",
- " 'systems',\n",
- " 'exclude_systems',\n",
- " 'subsystems',\n",
- " 'exclude_subsystems',\n",
- " 'cscs',\n",
- " 'exclude_cscs',\n",
- " 'components',\n",
- " 'exclude_components',\n",
- " 'primary_software_components',\n",
- " 'exclude_primary_software_components',\n",
- " 'primary_hardware_components',\n",
- " 'exclude_primary_hardware_components',\n",
- " 'urls',\n",
- " 'min_time_lost',\n",
- " 'max_time_lost',\n",
- " 'has_date_begin',\n",
- " 'min_date_begin',\n",
- " 'max_date_begin',\n",
- " 'has_date_end',\n",
- " 'min_date_end',\n",
- " 'max_date_end',\n",
- " 'is_human', # Allowed: either, true, false; Default=either\n",
- " 'is_valid', # Allowed: either, true, false; Default=true\n",
- " 'min_date_added', # inclusive, TAI ISO string, no TZ\n",
- " 'max_date_added', # exclusive, TAI ISO string, no TZ\n",
- " 'has_date_invalidated',\n",
- " 'min_date_invalidated',\n",
- " 'max_date_invalidated',\n",
- " 'has_parent_id',\n",
- " 'order_by',\n",
- " 'offset',\n",
- " 'limit'\n",
- " }\n",
- "\n",
- " def get_messages(self,\n",
- " site_ids=None,\n",
- " message_text=None,\n",
- " min_date_end=None,\n",
- " max_date_end=None,\n",
- " is_human='either',\n",
- " is_valid='either',\n",
- " offset=None,\n",
- " limit=None,\n",
- " outfields=None,\n",
- " ):\n",
- " qparams = dict(is_human=is_human, is_valid=is_valid)\n",
- " if site_ids:\n",
- " qparams['site_ids'] = site_ids\n",
- " if message_text:\n",
- " qparams['message_text'] = message_text\n",
- " if min_date_end:\n",
- " qparams['min_date_end'] = min_date_end\n",
- " if max_date_end:\n",
- " qparams['max_date_end'] = max_date_end\n",
- " if limit:\n",
- " qparams['limit'] = limit\n",
- "\n",
- " qstr = urlencode(qparams)\n",
- " url = f'{self.server}/{self.service}/messages?{qstr}'\n",
- " try:\n",
- " recs = requests.get(url, timeout=self.timeout).json()\n",
- " recs.sort(key=lambda r: r['date_begin'])\n",
- " except Exception as err:\n",
- " warn(f'No {self.service} records retrieved: {err}')\n",
- " recs = []\n",
- "\n",
- " keep_fields(outfields, recs)\n",
- " self.recs = recs\n",
- " return self.recs\n",
- "\n",
- " def get_timelost(self, rollup='day'):\n",
- " day_tl = dict() # day_tl[day] = totalDayTimeLost\n",
- " for day,dayrecs in itertools.groupby(\n",
- " self.recs,\n",
- " key=lambda r: datetime.fromisoformat(r['date_begin']).date().isoformat()\n",
- " ):\n",
- " day_tl[day] = sum([r['time_lost'] for r in dayrecs])\n",
- " return day_tl\n",
- "\n",
- "class ExposurelogAdapter(SourceAdapter):\n",
- " \"\"\"TODO full documentation\n",
- "\n",
- " EXAMPLES:\n",
- " gaps,recs = logrep_utils.ExposurelogAdapter(server_url='https://usdf-rsp-dev.slac.stanford.edu').get_observation_gaps('LSSTComCam')\n",
- " gaps,recs = logrep_utils.ExposurelogAdapter(server_url='[[https://tucson-teststand.lsst.codes').get_observation_gaps('LSSTComCam')\n",
- " \"\"\"\n",
- " ignore_fields = ['id']\n",
- " service = 'exposurelog'\n",
- " endpoints = [\n",
- " 'instruments',\n",
- " 'exposures',\n",
- " 'messages',\n",
- " ]\n",
- " primary_endpoint = 'messages'\n",
- " fields = {'date_added',\n",
- " 'date_invalidated',\n",
- " 'day_obs',\n",
- " 'exposure_flag',\n",
- " 'id',\n",
- " 'instrument',\n",
- " 'is_human',\n",
- " 'is_valid',\n",
- " 'level',\n",
- " 'message_text',\n",
- " 'obs_id',\n",
- " 'parent_id',\n",
- " 'seq_num',\n",
- " 'site_id',\n",
- " 'tags',\n",
- " 'urls',\n",
- " 'user_agent',\n",
- " 'user_id'}\n",
- " filters = {\n",
- " 'site_ids',\n",
- " 'obs_id',\n",
- " 'instruments',\n",
- " 'min_day_obs', # inclusive, integer in form YYYMMDD\n",
- " 'max_day_obs', # exclusive, integer in form YYYMMDD\n",
- " 'min_seq_num',\n",
- " 'max_seq_num',\n",
- " 'message_text', # Message text contain ...\n",
- " 'min_level', # inclusive\n",
- " 'max_level', # exclusive\n",
- " 'tags', # at least one must be present.\n",
- " 'urls',\n",
- " 'exclude_tags', # all must be absent\n",
- " 'user_ids',\n",
- " 'user_agents',\n",
- " 'is_human', # Allowed: either, true, false; Default=either\n",
- " 'is_valid', # Allowed: either, true, false; Default=true\n",
- " 'exposure_flags',\n",
- " 'min_date_added', # inclusive, TAI ISO string, no TZ\n",
- " 'max_date_added', # exclusive, TAI ISO string, no TZ\n",
- " 'has_date_invalidated',\n",
- " 'min_date_invalidated',\n",
- " 'max_date_invalidated',\n",
- " 'has_parent_id',\n",
- " 'order_by',\n",
- " 'offset',\n",
- " 'limit'\n",
- " }\n",
- "\n",
- "\n",
- " def check_endpoints(self, timeout=None):\n",
- " to = (timeout or self.timeout)\n",
- " print(f'Try connect to each endpoint of {self.server}/{self.service} '\n",
- " f'using timeout={to}.')\n",
- " url_http_status_code = dict()\n",
- "\n",
- " for ep in self.endpoints:\n",
- " qstr = '?instrument=na' if ep == 'exposures' else ''\n",
- " url = f'{self.server}/{self.service}/{ep}{qstr}'\n",
- " try:\n",
- " r = requests.get(url, timeout=to)\n",
- " except:\n",
- " url_http_status_code[url] = 'timeout'\n",
- " else:\n",
- " url_http_status_code[url] = r.status_code\n",
- " return url_http_status_code\n",
- "\n",
- "\n",
- " def get_instruments(self):\n",
- " url = f'{self.server}/{self.service}/instruments'\n",
- " try:\n",
- " instruments = requests.get(url, timeout=self.timeout).json()\n",
- " except Exception as err:\n",
- " warn(f'No instruments retrieved: {err}')\n",
- " instruments = dict(dummy=[])\n",
- " # Flatten the lists\n",
- " return list(itertools.chain.from_iterable(instruments.values()))\n",
- "\n",
- " def get_exposures(self, instrument, registry=1):\n",
- " qparams = dict(instrument=instrument, registery=registry)\n",
- " url = f'{self.server}/{self.service}/exposures?{urlencode(qparams)}'\n",
- " try:\n",
- " recs = requests.get(url, timeout=self.timeout).json()\n",
- " except Exception as err:\n",
- " warn(f'No exposures retrieved: {err}')\n",
- " recs = []\n",
- " return recs\n",
- "\n",
- " def get_messages(self,\n",
- " site_ids=None,\n",
- " obs_ids=None,\n",
- " instruments=None,\n",
- " message_text=None,\n",
- " min_day_obs=None,\n",
- " max_day_obs=None,\n",
- " is_human='either',\n",
- " is_valid='either',\n",
- " exposure_flags=None,\n",
- " offset=None,\n",
- " limit=None,\n",
- " outfields=None,\n",
- " ):\n",
- " qparams = dict(is_human=is_human, is_valid=is_valid)\n",
- " if site_ids:\n",
- " qparams['site_ids'] = site_ids\n",
- " if obs_ids:\n",
- " qparams['obs_ids'] = obs_ids\n",
- " if instruments:\n",
- " qparams['instruments'] = instruments\n",
- " if min_day_obs:\n",
- " qparams['min_day_obs'] = min_day_obs\n",
- " if max_day_obs:\n",
- " qparams['max_day_obs'] = max_day_obs\n",
- " if exposure_flags:\n",
- " qparams['exposure_flags'] = exposure_flags\n",
- " if offset:\n",
- " qparams['offset'] = offset\n",
- " if limit:\n",
- " qparams['limit'] = limit\n",
- "\n",
- " qstr = urlencode(qparams)\n",
- " url = f'{self.server}/{self.service}/messages?{qstr}'\n",
- " recs = []\n",
- " try:\n",
- " response = requests.get(url, timeout=self.timeout)\n",
- " recs = response.json()\n",
- " except Exception as err:\n",
- " warnings.warn(f'No {self.service} records retrieved: {err}')\n",
- "\n",
- " if len(recs) == 0:\n",
- " warn(f'No records retrieved from {url}')\n",
- "\n",
- " if recs:\n",
- " recs.sort(key=lambda r: r['day_obs'])\n",
- "\n",
- " keep_fields(outfields, recs)\n",
- " self.recs = recs\n",
- " return self.recs\n",
- "\n",
- " def get_observation_gaps(self, instruments=None,\n",
- " min_day_obs=None, # YYYYMMDD\n",
- " max_day_obs=None, # YYYYMMDD\n",
- " ):\n",
- " if not instruments:\n",
- " instruments = self.get_instruments()\n",
- " assert isinstance(instruments,list), \\\n",
- " f'\"instruments\" must be a list. Got {instruments!r}'\n",
- " # inst_day_rollupol[instrument] => dict[day] => exposureGapInMinutes\n",
- " inst_day_rollup = defaultdict(dict) # Instrument/Day rollup\n",
- "\n",
- " for instrum in instruments:\n",
- " recs = self.get_exposures(instrum)\n",
- " instrum_gaps = dict()\n",
- " for day,dayrecs in itertools.groupby(recs,\n",
- " key=lambda r: r['day_obs']):\n",
- " gaps = list()\n",
- " begin = end = None\n",
- " for rec in dayrecs:\n",
- " begin = rec['timespan_begin']\n",
- " if end:\n",
- " # span in minutes\n",
- " diff = (datetime.fromisoformat(begin)\n",
- " - datetime.fromisoformat(end)\n",
- " ).total_seconds() / 60.0\n",
- "\n",
- " gaps.append((\n",
- " datetime.fromisoformat(end).time().isoformat(),\n",
- " datetime.fromisoformat(begin).time().isoformat(),\n",
- " diff\n",
- " ))\n",
- " end = rec['timespan_end']\n",
- " instrum_gaps[day] = gaps\n",
- "\n",
- " #!roll = dict()\n",
- " # Rollup gap times by day\n",
- " for day,tuples in instrum_gaps.items():\n",
- " #!roll[day] = sum([t[2] for t in tuples])\n",
- " inst_day_rollup[instrum][day] = sum([t[2] for t in tuples])\n",
- "\n",
- " return inst_day_rollup\n",
- "\n",
- "\n",
- "\n",
- "\n",
- "class Dashboard: # TODO Complete and move to its own file.\n",
- " \"\"\"Verify that we can get to all the API endpoints and databases we need for\n",
- " any of our sources.\n",
- " \"\"\"\n",
- "\n",
- " envs = dict(\n",
- " summit = 'https://summit-lsp.lsst.codes',\n",
- " usdf_dev = 'https://usdf-rsp-dev.slac.stanford.edu',\n",
- " tucson = 'https://tucson-teststand.lsst.codes',\n",
- " # Environments not currently used:\n",
- " # rubin_usdf_dev = '',\n",
- " # data_lsst_cloud = '',\n",
- " # usdf = '',\n",
- " # base_data_facility = '',\n",
- " # rubin_idf_int = '',\n",
- " )\n",
- " adapters = [ExposurelogAdapter,\n",
- " NarrativelogAdapter,\n",
- " # NightReportAdapter, # TODO\n",
- " ]\n",
- "\n",
- " def report(self, timeout=None):\n",
- " url_status = dict()\n",
- " for env,server in self.envs.items():\n",
- " for adapter in self.adapters:\n",
- " service = adapter(server_url=server)\n",
- " # url_status[endpoint_url] = http_status_code\n",
- " url_status.update(service.check_endpoints(timeout=timeout))\n",
- " total_cnt = 0\n",
- " good_cnt = 0\n",
- " good = list()\n",
- " print('\\nStatus for each endpoint URL:')\n",
- " for url,stat in url_status.items():\n",
- " print(f'{stat}\\t{url}')\n",
- " total_cnt += 1\n",
- " if stat == 200:\n",
- " good_cnt += 1\n",
- " good.append(url)\n",
- " print(f'\\nConnected to {good_cnt} out of {total_cnt} endpoints.')\n",
- " return good_cnt, good\n"
+ "# Display various almanac values (for moon, sun)\n",
+ "rep.AlmanacReport().almanac_as_dataframe()"
]
},
{
"cell_type": "markdown",
- "id": "8",
+ "id": "12",
"metadata": {},
"source": [
- "# Exposure Log"
+ "# Night Report"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "9",
+ "id": "13",
"metadata": {},
"outputs": [],
"source": [
- "exposure_adapter = ExposurelogAdapter(server_url=server)\n",
- "exposure_url = exposure_adapter.source_url\n",
- "try:\n",
- " exposure_recs = exposure_adapter.get_messages(limit=limit,\n",
- " min_day_obs=min_day_obs,\n",
- " max_day_obs=max_day_obs,\n",
- " )\n",
- "except Exception as err:\n",
- " exposure_recs = []\n",
- " msg = f'ERROR getting records from {exposure_url=}: {err=}'\n",
- " raise Exception(msg)"
+ "# Get data from Night Report log. Display nightly Jira BLOCKS.\n",
+ "nr_adapter = sad.NightReportAdapter(server_url=server,\n",
+ " limit=limit,\n",
+ " min_day_obs=min_day_obs,\n",
+ " max_day_obs=max_day_obs,)\n",
+ "\n",
+ "status = nr_adapter.get_reports()\n",
+ "nr_url = status['endpoint_url']\n",
+ "#display(status)\n",
+ "rep.adapter_overview(nr_adapter, status, limit)"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "10",
+ "id": "14",
"metadata": {},
"outputs": [],
"source": [
- "print(f'Retrieved {len(exposure_recs)} records from {exposure_url}')"
+ "# Display Jira BLOCKS\n",
+ "front = 'https://rubinobs.atlassian.net/projects/BLOCK?selectedItem=com.atlassian.plugins.atlassian-connect-plugin:com.kanoah.test-manager__main-project-page#!/'\n",
+ "tickets = nr_adapter.nightly_tickets(nr_adapter.records)\n",
+ "\n",
+ "if tickets:\n",
+ " mdstr = '## Nightly Jira BLOCKs'\n",
+ " for day, url_list in tickets.items():\n",
+ " mdstr += f'\\n- {day}'\n",
+ " for ticket_url in url_list:\n",
+ " mdstr += f'\\n - [{ticket_url.replace(front,\"\")}]({ticket_url})'\n",
+ " md(mdstr)\n",
+ "else:\n",
+ " md(f'No jira BLOCK tickets found.', color='lightblue')\n",
+ " md(f'Used: [API Data]({nr_url})')\n",
+ "\n",
+ "# Display time log\n",
+ "nr_rep = rep.NightlyLogReport(min_day_obs=min_day_obs, max_day_obs=max_day_obs)\n",
+ "nr_rep.time_log_as_markdown(nr_adapter, nr_url)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "15",
+ "metadata": {},
+ "source": [
+ "# Exposure Log"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "11",
+ "id": "16",
"metadata": {},
"outputs": [],
"source": [
- "if exposure_recs:\n",
- " new_column_names = dict(message_text='message',\n",
- " date_added='date'\n",
- " )\n",
- " df = pd.DataFrame(exposure_recs).rename(columns=new_column_names)\n",
- " user_df = df[['date','message']]\n",
- " \n",
- " display_markdown(f'### Exposure log for {number_of_days} days {min_day_obs} to {max_day_obs}', raw=True)\n",
- " display(user_df)"
+ "# Get data from Exposure log. Display time log.\n",
+ "exposure_adapter = sad.ExposurelogAdapter(\n",
+ " server_url=server,\n",
+ " limit=limit,\n",
+ " min_day_obs=min_day_obs,\n",
+ " max_day_obs=max_day_obs,\n",
+ ")\n",
+ "status = exposure_adapter.get_messages()\n",
+ "exposure_url = status['endpoint_url']\n",
+ "rep.adapter_overview(exposure_adapter, status, limit)\n",
+ "\n",
+ "# Display time log\n",
+ "exposure_rep = rep.ExposurelogReport(min_day_obs=min_day_obs, max_day_obs=max_day_obs)\n",
+ "exposure_rep.time_log_as_markdown(exposure_adapter, exposure_url)"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "12",
+ "id": "17",
"metadata": {},
"outputs": [],
"source": [
- "display(Markdown(f\"### {exposure_url}/exposures/ Not yet functional on USDF\"))\n",
+ "# Display Observation gaps\n",
"gaps = exposure_adapter.get_observation_gaps()\n",
"if gaps:\n",
+ " md(f'### Date vs Observation Gap (minutes) for all Instruments')\n",
" for instrument, day_gaps in gaps.items():\n",
- " display(Markdown(f'### Date vs Observation Gap (minutes) for {instrument=!s}'))\n",
- " x,y = zip(*day_gaps.items())\n",
- " df = pd.DataFrame(dict(day=x,minutes=y))\n",
- " df.plot.bar(x='day', y='minutes', title=f'{instrument=!s}')\n",
- "else:\n",
- " print(f'No Observation Gaps found in exposures.')"
+ " if len(day_gaps) == 0:\n",
+ " md(f'**No day gaps found for *{instrument=!s}* **', color='lightblue')\n",
+ " else:\n",
+ " x,y = zip(*day_gaps.items())\n",
+ " df = pd.DataFrame(dict(day=x,minutes=y))\n",
+ " df.plot.bar(x='day', y='minutes', title=f'{instrument=!s}')"
]
},
{
"cell_type": "markdown",
- "id": "13",
+ "id": "18",
"metadata": {},
"source": [
"# Narrative Log\n"
@@ -693,66 +298,64 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "14",
+ "id": "19",
"metadata": {},
"outputs": [],
"source": [
- "narrative_adapter = NarrativelogAdapter(server_url=server)\n",
- "narrative_url = narrative_adapter.source_url\n",
- "try:\n",
- " # date like '2000-01-02 12:00:00'\n",
- " # str(datetime(2000, 1, 2, 12, 0, 0))\n",
- " min_date = str(datetime.strptime(min_day_obs,'%Y%m%d'))\n",
- " max_date = str(datetime.strptime(max_day_obs,'%Y%m%d'))\n",
- " print(f'Get data from {narrative_url}: {min_date} to {max_date}')\n",
- " narrative_recs = narrative_adapter.get_messages(\n",
- " limit=limit,\n",
- " min_date_end=min_date,\n",
- " max_date_end=max_date\n",
- " )\n",
- "except Exception as err:\n",
- " narrative_recs = []\n",
- " msg = f'ERROR getting records from {narrative_url}: {err=}'\n",
- " raise Exception(msg)\n",
- "\n",
- "print(f'Retrieved {len(narrative_recs)} records.')"
+ "# Get data from Narrative log. Display time log.\n",
+ "narrative_adapter = sad.NarrativelogAdapter(\n",
+ " server_url=server,\n",
+ " limit=limit,\n",
+ " min_day_obs=min_day_obs,\n",
+ " max_day_obs=max_day_obs,\n",
+ ")\n",
+ "status = narrative_adapter.get_messages()\n",
+ "narrative_url = status['endpoint_url']\n",
+ "rep.adapter_overview(narrative_adapter, status, limit)\n",
+ "\n",
+ "narrrative_rep = rep.NarrativelogReport(min_day_obs=min_day_obs, max_day_obs=max_day_obs)\n",
+ "narrrative_rep.time_log_as_markdown(narrative_adapter, narrative_url)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "20",
+ "metadata": {},
+ "source": [
+ "# Developer Only Section"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "15",
+ "id": "21",
"metadata": {},
"outputs": [],
"source": [
- "new_column_names = dict(message_text='message',\n",
- " date_added='date'\n",
- " )\n",
- "\n",
- "if narrative_recs:\n",
- " keep_fields(['message_text','date_added'], narrative_recs)\n",
- " df = pd.DataFrame(narrative_recs).rename(columns=new_column_names)\n",
- " user_df = df # [['date','message']]\n",
- "\n",
- " display(Markdown(f'## Narrative log (Style A) for {number_of_days} days {min_day_obs} to {max_day_obs}'))\n",
- " display(Markdown(\"### Choose display Style (or offer other suggestion)\"))\n",
- " with pd.option_context('display.max_rows', None, 'display.max_columns', None):\n",
- " display(user_df)\n",
- " \n",
- " display(Markdown(f'## Narrative log (Style B)'))\n",
- " for index,row in user_df.iterrows():\n",
- " print(f\"{datetime.fromisoformat(user_df.iloc[0]['date']).date()}: \"\n",
- " f\"{row['message']}\"\n",
- " )"
+ "# Conditionally display our current ability to connect to all needed endpoints.\n",
+ "if not os.environ.get('EXTERNAL_INSTANCE_URL'):\n",
+ " md('## Dashboard')\n",
+ " md('(This is not done when running under Times Square.)')\n",
+ " %run ./dashboard.ipynb"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "22",
+ "metadata": {},
+ "source": [
+ "# Finale"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "16",
+ "id": "23",
"metadata": {},
"outputs": [],
- "source": []
+ "source": [
+ "print(f'Finished {str(datetime.now())}')"
+ ]
}
],
"metadata": {
@@ -771,7 +374,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.12"
+ "version": "3.11.9"
}
},
"nbformat": 4,
diff --git a/notebooks_tsqr/NightLog.yaml b/notebooks_tsqr/NightLog.yaml
index 79cecb0..ce5fb78 100644
--- a/notebooks_tsqr/NightLog.yaml
+++ b/notebooks_tsqr/NightLog.yaml
@@ -11,19 +11,19 @@ tags:
- prototype
- exposure
parameters:
- record_limit:
- type: integer
- description: Max number of records to output
- default: 99
- minimum: 1
- maximum: 9999
day_obs:
type: string
- description: The night to report on. (YYYY-MM-DD, TODAY, YESTERDAY)
+ description: >
+ The night to report on. (Allowed: YYYY-MM-DD, TODAY, YESTERDAY)
+ The report will include days upto, but not including, day_obs.
+ For a value of TODAY, the last observing night that will be
+ included in the report will be the one that started yesterday.
+ This is usually what you want.
default: "TODAY"
number_of_days:
type: integer
- description: Number of days to show (ending in day_obs)
+ description: >
+ Number of days to show in the report.
default: 1
minimum: 1
maximum: 9
diff --git a/notebooks_tsqr/README.md b/notebooks_tsqr/README.md
index 3efda5a..98fb692 100644
--- a/notebooks_tsqr/README.md
+++ b/notebooks_tsqr/README.md
@@ -1,5 +1,58 @@
-Times Square notebooks for (potential) use in project-wide Logging & Reporting
+# Logging & Reporting Times Square Notebooks
-See [offical
-documentation](https://rsp.lsst.io/v/usdfdev/guides/times-square/index.html)
-on creating notebooks for use by Times Square.
+Notebooks found here `./notebooks/` are meant to be run in Times Square towards use in project-wide Nightly Logging & Reporting
+Times-Square:
+
+See [official Times-Square documentation](https://rsp.lsst.io/v/usdfdev/guides/times-square/index.html) on creating notebooks for use by Times Square.
+
+## Development Guidelines
+
+Rapid Prototyping is enabled with the branch `prototype`
+Times-Square for this repository displays the `prototype` branch.
+
+- Create a branch for your Jira Ticket in the format `tickets/dm-####` off of `prototype`
+- Communicate often with team mate when you want to push changes to `prototype`
+- Rebase your branch off `prototype` before merging your branch into `prototype`
+
+Example of flow:
+
+1. `git checkout prototype; git pull`
+2. `git checkout -b tickets/dm-23456`
+3. `git commit -m "work happened"; git push`
+4. `git checkout prototype; git pull`
+5. `git checkout tickets/dm-23456`
+6. `git rebase prototype`
+7. `git checkout prototype; git merge tickets/dm-23456; git push`
+
+
+
+Once Per Sprint (2 week), the developers on this repository (Steve Pothier & Valerie Becker) gather to discuss updates made to `prototype`, outstanding pull requests, and tickets that have been completed.
+
+Once they are in agreement, they merge `prototype` into the `develop` branch and close the related Jira Tickets. Squash commit should be used here with a descriptive title and description in the PR.
+
+
+## NightLog.ipynb
+
+NightLog.ipynb is our main Logging And Reporting notebook. This notebook is meant to display completed* views of logging information.
+Each separate notebook should be used to mature a logging/reporting product, and then expect to be integrated into this 'main' notebook.
+
+\*_Completed to an alpha\beta level -- quick improvements will continue to happen during Fall-Winter 2024_
+
+## Dashboard
+
+Dashboard.ipynb is intended for local development purposes and debugging. Run this notebook not from RSP to evaluate your connection to an array of data sources.
+_RSP is not intended to have access to all of the data sources queried here._
+
+## Kernel
+
+Times Square developers/maintainers have indicated that the LSST Kernel should be used in notebooks displayed there.
+[RSP Stack info](https://developer.lsst.io/stack/conda.html#rubin-science-platform-notebooks)
+
+## Backend Code
+
+We are working our way into a non-Times-Square dependent project. Towards that effort, we are incrementally abstracting common code out of the notebooks. This code is kept in `./python/lsst/ts/logging_and_reporting/`
+
+`almanac.py` ...
+`reports.py` ...
+`source_adapters.py` ....
+`utils.py` is used for ...
\ No newline at end of file
diff --git a/notebooks_tsqr/TEMPLATE_logrep.yaml b/notebooks_tsqr/TEMPLATE_logrep.HIDE_yaml
similarity index 100%
rename from notebooks_tsqr/TEMPLATE_logrep.yaml
rename to notebooks_tsqr/TEMPLATE_logrep.HIDE_yaml
diff --git a/notebooks_tsqr/consdb/access_consdb.ipynb b/notebooks_tsqr/consdb/access_consdb.ipynb
index 27e58a3..dcc59d1 100644
--- a/notebooks_tsqr/consdb/access_consdb.ipynb
+++ b/notebooks_tsqr/consdb/access_consdb.ipynb
@@ -30,14 +30,12 @@
"source": [
"import os\n",
"from lsst.summit.utils import ConsDbClient\n",
- "from lsst.summit.utils.utils import computeCcdExposureId\n",
- "import sqlalchemy\n",
"import requests\n",
"import pandas as pd\n",
"from IPython.display import display, Markdown, display_markdown\n",
"\n",
- "\n",
- "URL = \"https://usdf-rsp.slac.stanford.edu/consdb/\" # Need to add that part about the headers to client flow through\n",
+ "# URL = \"https://usdf-rsp.slac.stanford.edu/consdb/\"\n",
+ "# Need to add that part about the headers to client flow through\n",
"URL = \"http://consdb-pq.consdb:8080/consdb\" # Don't use this one\n",
"\n",
"os.environ[\"no_proxy\"] += \",.consdb\"\n",
@@ -45,8 +43,9 @@
"access_token = os.getenv(\"ACCESS_TOKEN\")\n",
"headers = {\"Authorization\": f\"Bearer {access_token}\"}\n",
"\n",
- "sesh = requests.Session()\n",
- "sesh.headers.update(headers)\n",
+ "# This is how the session object should access the ACCESS Token from the headers\n",
+ "#sesh = requests.Session()\n",
+ "#sesh.headers.update(headers)\n",
"\n",
"%matplotlib inline"
]
@@ -58,17 +57,13 @@
"metadata": {},
"outputs": [],
"source": [
- "display_markdown('## Attempting to access Consolidated Database', raw=True)\n",
- "\n",
- "try:\n",
- " from lsst.summit.utils import ConsDbClient\n",
- " have_consdb = True\n",
- "except ImportError:\n",
- " have_consdb = False\n",
- "\n",
- "if have_consdb:\n",
- " client = ConsDbClient(URL)\n",
- " display_markdown('## Consolidated Database is accessible',raw=True)\n"
+ "from lsst.summit.utils import ConsDbClient\n",
+ "client = ConsDbClient(URL)\n",
+ "print(client)\n",
+ "#import sqlalchemy\n",
+ "#connection = sqlalchemy.create_engine('postgresql://usdf@usdf-summitdb.slac.stanford.edu/exposurelog')\n",
+ "#print(connection)\n",
+ "display_markdown('### Consolidated Database is accessible',raw=True)"
]
},
{
@@ -87,7 +82,6 @@
"metadata": {},
"outputs": [],
"source": [
- "# Add consdb\n",
"day_obs_int = int(day_obs.replace('-', ''))\n",
"\n",
"visit_query1 = f'''\n",
@@ -114,31 +108,33 @@
" WHERE v.day_obs = {day_obs_int} and q.visit_id = v.visit_id\n",
"'''\n",
"\n",
- "if have_consdb:\n",
- " # Potentially print some schema information for debugging\n",
- " \n",
+ "# Potentially print some schema information for debugging\n",
+ "try:\n",
" print(client.schema()) # list the instruments\n",
" print(client.schema('latiss')) # list tables for an instrument\n",
" print(client.schema('latiss', 'cdb_latiss.exposure_flexdata')) # specifically flexdata table\n",
" \n",
- " try:\n",
- " visits_latiss = consdb.query(visit_query1).to_pandas()\n",
- " visits_lsstcc = consdb.query(visit_query2).to_pandas()\n",
- " visits_lsstccs = consdb.query(visit_query3).to_pandas()\n",
+ "except requests.HTTPError or requests.JSONDecodeError:\n",
+ " print(client.schema()) # list the instruments\n",
+ " print(client.schema('latiss')) # list tables for an instrument\n",
+ " print(client.schema('latiss', 'cdb_latiss.exposure_flexdata'))\n",
+ "\n",
+ "try:\n",
+ " visits_latiss = client.query(visit_query1).to_pandas()\n",
+ " visits_lsstcc = client.query(visit_query2).to_pandas()\n",
+ " visits_lsstccs = client.query(visit_query3).to_pandas()\n",
"\n",
- " except requests.HTTPError or requests.JSONDecodeError:\n",
- " # Try twice\n",
- " visits_latiss = consdb.query(visit_query1).to_pandas()\n",
- " visits_lsstcc = consdb.query(visit_query2).to_pandas()\n",
- " visits_lsstccs = consdb.query(visit_query3).to_pandas()\n",
+ "except requests.HTTPError or requests.JSONDecodeError:\n",
+ " # Try twice\n",
+ " visits_latiss = client.query(visit_query1).to_pandas()\n",
+ " visits_lsstcc = client.query(visit_query2).to_pandas()\n",
+ " visits_lsstccs = client.query(visit_query3).to_pandas()\n",
"\n",
- " quicklook = consdb.query(quicklook_query).to_pandas()\n",
+ "quicklook = client.query(quicklook_query).to_pandas()\n",
"\n",
- "else:\n",
- " # Assumes at the USDF\n",
- " connection = sqlalchemy.create_engine('postgresql://usdf@usdf-summitdb.slac.stanford.edu/exposurelog')\n",
- " visits_latiss = pd.read_sql(visit_query1, connection)\n",
- " quicklook = pd.read_sql(quicklook_query, connection)\n",
+ "# Assumes at the USDF\n",
+ "#visits_latiss_try = pd.read_sql(visit_query1, connection)\n",
+ "#quicklook_try = pd.read_sql(quicklook_query, connection)\n",
"\n",
"if len(visits_latiss) > 0:\n",
" print(f\"Retrieved {len(visits_latiss)} visits from consdb\")\n",
@@ -256,14 +252,17 @@
]
},
{
- "cell_type": "markdown",
+ "cell_type": "code",
+ "execution_count": null,
"id": "15",
"metadata": {},
+ "outputs": [],
"source": [
- "# Access DDV\n",
- "Summit = https://summit-lsp.lsst.codes/rubintv-dev/ddv/index.html \n",
- "\n",
- "USDF = https://usdf-rsp-dev.slac.stanford.edu/rubintv/ddv/index.html"
+ "usdf = 'https://usdf-rsp-dev.slac.stanford.edu'\n",
+ "service_loc = os.environ.get('EXTERNAL_INSTANCE_URL', usdf)\n",
+ "DDV = f\"{service_loc}/rubintv-dev/ddv/index.html\"\n",
+ "display_markdown('## Access DDV part of RubinTV', raw=True)\n",
+ "DDV"
]
},
{
diff --git a/notebooks_tsqr/consdb/assorted_plots.ipynb b/notebooks_tsqr/consdb/assorted_plots.ipynb
new file mode 100644
index 0000000..48343dd
--- /dev/null
+++ b/notebooks_tsqr/consdb/assorted_plots.ipynb
@@ -0,0 +1,158 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Parameters\n",
+ "day_obs = '2024-06-26'\n",
+ "instruments = 'latiss, lsstcomcamsim, lsstcomcam'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "import matplotlib.pyplot as plt\n",
+ "from IPython.display import display_markdown"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "### 'Get Consdb access'\n",
+ "from lsst.summit.utils import ConsDbClient\n",
+ "\n",
+ "URL = \"http://consdb-pq.consdb:8080/consdb\" # Don't use this one\n",
+ "os.environ[\"no_proxy\"] += \",.consdb\"\n",
+ "\n",
+ "client = ConsDbClient(URL)\n",
+ "display_markdown('### Consolidated Database is accessible',raw=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%matplotlib inline # After all imports"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Put Plot in backend\n",
+ "plt.style.use('seaborn-v0_8-bright')\n",
+ "def plot(y, x):\n",
+ " # plot\n",
+ " fig = plt.figure(figsize=(6, 6))\n",
+ " ax = fig.subplots()\n",
+ " ax.scatter(x, y)\n",
+ "\n",
+ " plt.show()\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "day_obs_int = int(day_obs.replace('-', ''))\n",
+ "instrument_list = [ins.strip() for ins in instruments.split(',')]\n",
+ "\n",
+ "for instrument in instrument_list:\n",
+ " print(\"------------------------------------------------------------\")\n",
+ " print()\n",
+ " display_markdown(f'# Instrument: {instrument}',raw=True)\n",
+ " #################### Put in Backend\n",
+ " ccdvisit1_quicklook = f'''\n",
+ " SELECT * FROM cdb_{instrument}.ccdvisit1_quicklook\n",
+ " '''\n",
+ "\n",
+ " visit1 = f'''\n",
+ " SELECT * FROM cdb_{instrument}.visit1\n",
+ " '''\n",
+ "\n",
+ " # Join Visit1 and ccdVisit1 to access data and day obs\n",
+ " visits = client.query(visit1).to_pandas()\n",
+ " quicklook = client.query(ccdvisit1_quicklook).to_pandas()\n",
+ "\n",
+ " visits = visits.join(quicklook, on='visit_id',lsuffix='',rsuffix='_q')\n",
+ " #################### Put in Backend - end\n",
+ "\n",
+ " # If we see data exist in psf, zero, then we should pare down like visits_today below\n",
+ " try:\n",
+ " visits_w_psf = visits[visits['psf_area'].notna()]\n",
+ " time = visits_w_psf['obs_start']\n",
+ " display_markdown(f'Number of visits with psf_area populated {len(visits_w_psf)}', raw=True)\n",
+ " display_markdown('## psf_area vs obs_start', raw=True)\n",
+ " plot(time, visits_w_psf['psf_area'])\n",
+ " except KeyError as err:\n",
+ " display_markdown(f\"Psf_area not a column in {instrument} dataframe\",raw=True)\n",
+ " display_markdown(f\"key error for {err}\", raw=True)\n",
+ "\n",
+ " try:\n",
+ " visits_w_zero = visits[visits['zero_point'].notna()]\n",
+ " time = visits_w_zero['obs_start']\n",
+ " display_markdown(f'Number of visits with zero_point populated {len(visits_w_zero)}', raw=True)\n",
+ " display_markdown('## zero_point vs obs_start', raw=True)\n",
+ " plot(time, visits_w_zero['zero_point'])\n",
+ " except KeyError as err:\n",
+ " display_markdown(f\"Zero_point not a column in {instrument} dataframe\", raw=True)\n",
+ " display_markdown(f\"key error for {err}\", raw=True)\n",
+ "\n",
+ " # Pare down to only day obs\n",
+ " visits_today = visits[(visits['day_obs'] == day_obs_int)]\n",
+ " display_markdown(f\"How many visits today? {len(visits_today)}\", raw=True)\n",
+ "\n",
+ " ra = visits_today['s_ra']\n",
+ " dec = visits_today['s_dec']\n",
+ " display_markdown(f\"How many ra? {len(ra)}\", raw=True)\n",
+ " display_markdown(f\"How many dec? {len(dec)}\", raw=True)\n",
+ "\n",
+ " display_markdown(f'Ra Dec should be populated for {instrument}', raw=True)\n",
+ " plot(ra, dec)"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "LSST",
+ "language": "python",
+ "name": "lsst"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.9"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/notebooks_tsqr/consdb/assorted_plots.yaml b/notebooks_tsqr/consdb/assorted_plots.yaml
new file mode 100644
index 0000000..f0c9f5e
--- /dev/null
+++ b/notebooks_tsqr/consdb/assorted_plots.yaml
@@ -0,0 +1,18 @@
+# For use with a Times Square notebook
+title: Assorted Plots
+description: Trying plots
+authors:
+ - name: Valerie Becker
+ slack: valerie becker
+tags:
+ - reporting
+ - prototype
+parameters:
+ day_obs:
+ type: string
+ description: Date on which to query
+ default: '2024-06-26'
+ instruments:
+ type: string
+ description: Which instrument to query
+ default: 'latiss, lsstcomcamsim, lsstcomcam'
\ No newline at end of file
diff --git a/notebooks_tsqr/dashboard.ipynb b/notebooks_tsqr/dashboard.ipynb
new file mode 100644
index 0000000..8c08d4b
--- /dev/null
+++ b/notebooks_tsqr/dashboard.ipynb
@@ -0,0 +1,135 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "0",
+ "metadata": {},
+ "source": [
+ "# Logger Source API Dashboard\n",
+ "For all of these to work, the following must be enabled:\n",
+ "- Tucson VPN\n",
+ "- Summit VPN\n",
+ "- User has access to USDF-dev (SLAC)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from pprint import pformat, pp\n",
+ "from datetime import datetime, date, timedelta\n",
+ "from lsst.ts.logging_and_reporting.dashboard import Dashboard\n",
+ "from lsst.ts.logging_and_reporting.reports import md,mdlist"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2",
+ "metadata": {},
+ "source": [
+ "## Connects"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dash = Dashboard()\n",
+ "score, working = dash.report()\n",
+ "# On 9/12/2024 this gets two failed connects. \n",
+ "# The are usdf: exposurelog/instruments,exposurelog/exposures\n",
+ "# When usdfdev if fully functional, there should be zero failed connects (with both VPNs active)."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4",
+ "metadata": {},
+ "source": [
+ "## Score"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "md(f'{score=:.0%}')\n",
+ "md('**Servers that are fully functional** for Logging and Reporting:')\n",
+ "mdlist([f'- {w}' for w in working])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "6",
+ "metadata": {},
+ "source": [
+ "## Samples"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "server = 'https://summit-lsp.lsst.codes'\n",
+ "samples = dash.get_sample_data(server)\n",
+ "print('One record of data from all endpoints used by LogRep:')\n",
+ "for endpoint,sample in samples.items():\n",
+ " base_ep = endpoint.replace(server,'')\n",
+ " md(f'\\n### Endpoint: {base_ep}')\n",
+ " print(f'{endpoint}')\n",
+ " pp(sample)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8",
+ "metadata": {},
+ "source": [
+ "# Finale"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(f'Finished {str(datetime.now())}')"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.9"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/notebooks_tsqr/efd.yaml b/notebooks_tsqr/efd.HIDE_yaml
similarity index 100%
rename from notebooks_tsqr/efd.yaml
rename to notebooks_tsqr/efd.HIDE_yaml
diff --git a/notebooks_tsqr/exposurelog.ipynb b/notebooks_tsqr/exposurelog.ipynb
deleted file mode 100644
index 1581b3d..0000000
--- a/notebooks_tsqr/exposurelog.ipynb
+++ /dev/null
@@ -1,739 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "0",
- "metadata": {},
- "outputs": [],
- "source": [
- "# Parameters. Set defaults here.\n",
- "# Times Square replaces this cell with the user's parameters.\n",
- "record_limit = '99'"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "1",
- "metadata": {},
- "source": [
- "\n",
- "## Imports and General Setup"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "2",
- "metadata": {},
- "outputs": [],
- "source": [
- "# Only use packages available in the Rubin Science Platform\n",
- "import requests\n",
- "from collections import defaultdict\n",
- "import pandas as pd\n",
- "from pprint import pp\n",
- "from urllib.parse import urlencode\n",
- "from IPython.display import FileLink, display_markdown\n",
- "from matplotlib import pyplot as plt\n",
- "import os\n",
- "from datetime import datetime, date"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "3",
- "metadata": {},
- "outputs": [],
- "source": [
- "limit = int(record_limit)\n",
- "\n",
- "response_timeout = 3.05 # seconds, how long to wait for connection\n",
- "read_timeout = 20 # seconds\n",
- "timeout = (float(response_timeout), float(read_timeout))\n",
- "\n",
- "summit = 'https://summit-lsp.lsst.codes'\n",
- "usdf = 'https://usdf-rsp-dev.slac.stanford.edu'\n",
- "tucson = 'https://tucson-teststand.lsst.codes'\n",
- "\n",
- "# Use server=tucson for dev testing\n",
- "server = os.environ.get('EXTERNAL_INSTANCE_URL', summit)\n",
- "log = 'exposurelog'\n",
- "service = f'{server}/{log}'\n",
- "service"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "4",
- "metadata": {},
- "source": [
- "\n",
- "## Setup Source"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "5",
- "metadata": {},
- "outputs": [],
- "source": [
- "# For Times Square, comment out next line and past next cell with contents of local python file.\n",
- "#! from lsst.ts.logging_and_reporting.source_adapters import ExposurelogAdapter\n",
- "# Once our logrep package has been installed in RSP, we can use the simpler \"import\""
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "6",
- "metadata": {},
- "outputs": [],
- "source": [
- "# Paste contents of source_adapters.py here\n",
- "# This file is part of ts_logging_and_reporting.\n",
- "#\n",
- "# Developed for Vera C. Rubin Observatory Telescope and Site Systems.\n",
- "# This product includes software developed by the LSST Project\n",
- "# (https://www.lsst.org).\n",
- "# See the COPYRIGHT file at the top-level directory of this distribution\n",
- "# for details of code ownership.\n",
- "#\n",
- "# This program is free software: you can redistribute it and/or modify\n",
- "# it under the terms of the GNU General Public License as published by\n",
- "# the Free Software Foundation, either version 3 of the License, or\n",
- "# (at your option) any later version.\n",
- "#\n",
- "# This program is distributed in the hope that it will be useful,\n",
- "# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
- "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
- "# GNU General Public License for more details.\n",
- "#\n",
- "# You should have received a copy of the GNU General Public License\n",
- "# along with this program. If not, see .\n",
- "\n",
- "\n",
- "############################################\n",
- "# Python Standard Library\n",
- "from urllib.parse import urlencode\n",
- "import itertools\n",
- "from datetime import datetime\n",
- "import warnings\n",
- "from collections import defaultdict\n",
- "############################################\n",
- "# External Packages\n",
- "import requests\n",
- "\n",
- "\n",
- "MAX_CONNECT_TIMEOUT = 3.1 # seconds\n",
- "MAX_READ_TIMEOUT = 90 * 60 # seconds\n",
- "\n",
- "class ApiAdapter:\n",
- " def __init__(self, *,\n",
- " server_url='https://tucson-teststand.lsst.codes',\n",
- " connect_timeout=3.05, # seconds\n",
- " read_timeout=10 * 60, # seconds\n",
- " ):\n",
- " self.server = server_url\n",
- " self.c_timeout = min(MAX_CONNECT_TIMEOUT,\n",
- " float(connect_timeout)) # seconds\n",
- " self.r_timeout = min(MAX_READ_TIMEOUT, # seconds\n",
- " float(read_timeout))\n",
- " self.timeout = (self.c_timeout, self.r_timeout)\n",
- "\n",
- " # We may be accessing several endpoints of an API.\n",
- " # If so, we will get different types of records for each.\n",
- " # The following are for the \"primary_endpoint\".\n",
- " self.ignore_fields = list()\n",
- " self.categoricals = list()\n",
- " self.foreign_keys = list()\n",
- "\n",
- "\n",
- " def analytics(self, recs, categorical_fields=None):\n",
- " non_cats = set([\n",
- " 'tags', 'urls', 'message_text', 'id', 'date_added',\n",
- " 'obs_id', 'day_obs', 'seq_num', 'parent_id', 'user_id',\n",
- " 'date_invalidated', 'date_begin', 'date_end',\n",
- " 'time_lost', # float\n",
- " # 'systems','subsystems','cscs', # values need special handling\n",
- " ])\n",
- " flds = set(recs[0].keys())\n",
- " if not categorical_fields:\n",
- " categorical_fields = flds\n",
- " ignore_fields = flds - categorical_fields\n",
- " facflds = flds - ignore_fields\n",
- "\n",
- " # facets(field) = set(value-1, value-2, ...)\n",
- " facets = {fld: set([str(r[fld])\n",
- " for r in recs if not isinstance(r[fld], list)])\n",
- " for fld in facflds}\n",
- " return dict(fields=flds,\n",
- " facet_fields=facflds,\n",
- " facets=facets,\n",
- " )\n",
- "\n",
- "\n",
- "class NarrativelogAdapter(ApiAdapter):\n",
- " service = 'narrativelog'\n",
- " primary_endpoint = 'messages'\n",
- " fields = {'category',\n",
- " 'components',\n",
- " 'cscs',\n",
- " 'date_added',\n",
- " 'date_begin',\n",
- " 'date_end',\n",
- " 'date_invalidated',\n",
- " 'id',\n",
- " 'is_human',\n",
- " 'is_valid',\n",
- " 'level',\n",
- " 'message_text',\n",
- " 'parent_id',\n",
- " 'primary_hardware_components',\n",
- " 'primary_software_components',\n",
- " 'site_id',\n",
- " 'subsystems',\n",
- " 'systems',\n",
- " 'tags',\n",
- " 'time_lost',\n",
- " 'time_lost_type',\n",
- " 'urls',\n",
- " 'user_agent',\n",
- " 'user_id'}\n",
- " filters = {\n",
- " 'site_ids',\n",
- " 'message_text', # Message text contain ...\n",
- " 'min_level', # inclusive\n",
- " 'max_level', # exclusive\n",
- " 'user_ids',\n",
- " 'user_agents',\n",
- " 'categories',\n",
- " 'exclude_categories',\n",
- " 'time_lost_types',\n",
- " 'exclude_time_lost_types',\n",
- " 'tags', # at least one must be present.\n",
- " 'exclude_tags', # all must be absent\n",
- " 'systems',\n",
- " 'exclude_systems',\n",
- " 'subsystems',\n",
- " 'exclude_subsystems',\n",
- " 'cscs',\n",
- " 'exclude_cscs',\n",
- " 'components',\n",
- " 'exclude_components',\n",
- " 'primary_software_components',\n",
- " 'exclude_primary_software_components',\n",
- " 'primary_hardware_components',\n",
- " 'exclude_primary_hardware_components',\n",
- " 'urls',\n",
- " 'min_time_lost',\n",
- " 'max_time_lost',\n",
- " 'has_date_begin',\n",
- " 'min_date_begin',\n",
- " 'max_date_begin',\n",
- " 'has_date_end',\n",
- " 'min_date_end',\n",
- " 'max_date_end',\n",
- " 'is_human', # Allowed: either, true, false; Default=either\n",
- " 'is_valid', # Allowed: either, true, false; Default=true\n",
- " 'min_date_added', # inclusive, TAI ISO string, no TZ\n",
- " 'max_date_added', # exclusive, TAI ISO string, no TZ\n",
- " 'has_date_invalidated',\n",
- " 'min_date_invalidated',\n",
- " 'max_date_invalidated',\n",
- " 'has_parent_id',\n",
- " 'order_by',\n",
- " 'offset',\n",
- " 'limit'\n",
- " }\n",
- "\n",
- " def get_messages(self,\n",
- " site_ids=None,\n",
- " message_text=None,\n",
- " min_date_end=None,\n",
- " max_date_end=None,\n",
- " is_human='either',\n",
- " is_valid='either',\n",
- " offset=None,\n",
- " limit=None\n",
- " ):\n",
- " qparams = dict(is_human=is_human, is_valid=is_valid)\n",
- " if site_ids:\n",
- " qparams['site_ids'] = site_ids\n",
- " if message_text:\n",
- " qparams['message_text'] = message_text\n",
- " if min_date_end:\n",
- " qparams['min_date_end'] = min_date_end\n",
- " if max_date_end:\n",
- " qparams['max_date_end'] = max_date_end\n",
- " if limit:\n",
- " qparams['limit'] = limit\n",
- "\n",
- " qstr = urlencode(qparams)\n",
- " url = f'{self.server}/{self.service}/messages?{qstr}'\n",
- " try:\n",
- " recs = requests.get(url, timeout=self.timeout).json()\n",
- " except Exception as err:\n",
- " warnings.warn(f'No {self.service} records retrieved: {err}')\n",
- " recs = []\n",
- " if len(recs) == 0:\n",
- " raise Exception(f'No records retrieved from {url}')\n",
- "\n",
- " self.recs = recs\n",
- " self.recs.sort(key=lambda r: r['date_begin'])\n",
- " return recs\n",
- "\n",
- " def get_timelost(self, rollup='day'):\n",
- " day_tl = dict() # day_tl[day] = totalDayTimeLost\n",
- " for day,dayrecs in itertools.groupby(\n",
- " self.recs,\n",
- " key=lambda r: datetime.fromisoformat(r['date_begin']).date().isoformat()\n",
- " ):\n",
- " day_tl[day] = sum([r['time_lost'] for r in dayrecs])\n",
- " return day_tl\n",
- "\n",
- "class ExposurelogAdapter(ApiAdapter):\n",
- " service = 'exposurelog'\n",
- " primary_endpoint = 'messages'\n",
- " fields = {'date_added',\n",
- " 'date_invalidated',\n",
- " 'day_obs',\n",
- " 'exposure_flag',\n",
- " 'id',\n",
- " 'instrument',\n",
- " 'is_human',\n",
- " 'is_valid',\n",
- " 'level',\n",
- " 'message_text',\n",
- " 'obs_id',\n",
- " 'parent_id',\n",
- " 'seq_num',\n",
- " 'site_id',\n",
- " 'tags',\n",
- " 'urls',\n",
- " 'user_agent',\n",
- " 'user_id'}\n",
- " filters = {\n",
- " 'site_ids',\n",
- " 'obs_id',\n",
- " 'instruments',\n",
- " 'min_day_obs', # inclusive, integer in form YYYMMDD\n",
- " 'max_day_obs', # exclusive, integer in form YYYMMDD\n",
- " 'min_seq_num',\n",
- " 'max_seq_num',\n",
- " 'message_text', # Message text contain ...\n",
- " 'min_level', # inclusive\n",
- " 'max_level', # exclusive\n",
- " 'tags', # at least one must be present.\n",
- " 'urls',\n",
- " 'exclude_tags', # all must be absent\n",
- " 'user_ids',\n",
- " 'user_agents',\n",
- " 'is_human', # Allowed: either, true, false; Default=either\n",
- " 'is_valid', # Allowed: either, true, false; Default=true\n",
- " 'exposure_flags',\n",
- " 'min_date_added', # inclusive, TAI ISO string, no TZ\n",
- " 'max_date_added', # exclusive, TAI ISO string, no TZ\n",
- " 'has_date_invalidated',\n",
- " 'min_date_invalidated',\n",
- " 'max_date_invalidated',\n",
- " 'has_parent_id',\n",
- " 'order_by',\n",
- " 'offset',\n",
- " 'limit'\n",
- " }\n",
- "\n",
- "\n",
- "\n",
- " def get_instruments(self):\n",
- " url = f'{self.server}/{self.service}/instruments'\n",
- " try:\n",
- " instruments = requests.get(url, timeout=self.timeout).json()\n",
- " except Exception as err:\n",
- " warnings.warn(f'No instruments retrieved: {err}')\n",
- " instruments = dict(dummy=[])\n",
- " # Flatten the lists\n",
- " return list(itertools.chain.from_iterable(instruments.values()))\n",
- "\n",
- " def get_exposures(self, instrument, registry=1):\n",
- " qparams = dict(instrument=instrument, registery=registry)\n",
- " url = f'{self.server}/{self.service}/exposures?{urlencode(qparams)}'\n",
- " try:\n",
- " recs = requests.get(url, timeout=self.timeout).json()\n",
- " except Exception as err:\n",
- " warnings.warn(f'No exposures retrieved: {err}')\n",
- " recs = []\n",
- " return recs\n",
- "\n",
- " def get_messages(self,\n",
- " site_ids=None,\n",
- " obs_ids=None,\n",
- " instruments=None,\n",
- " message_text=None,\n",
- " min_day_obs=None,\n",
- " max_day_obs=None,\n",
- " is_human='either',\n",
- " is_valid='either',\n",
- " exposure_flags=None,\n",
- " offset=None,\n",
- " limit=None\n",
- " ):\n",
- " qparams = dict(is_human=is_human, is_valid=is_valid)\n",
- " if site_ids:\n",
- " qparams['site_ids'] = site_ids\n",
- " if obs_ids:\n",
- " qparams['obs_ids'] = obs_ids\n",
- " if instruments:\n",
- " qparams['instruments'] = instruments\n",
- " if min_day_obs:\n",
- " qparams['min_day_obs'] = min_day_obs\n",
- " if max_day_obs:\n",
- " qparams['max_day_obs'] = max_day_obs\n",
- " if exposure_flags:\n",
- " qparams['exposure_flags'] = exposure_flags\n",
- " if offset:\n",
- " qparams['offset'] = offset\n",
- " if limit:\n",
- " qparams['limit'] = limit\n",
- "\n",
- " qstr = urlencode(qparams)\n",
- " url = f'{self.server}/{self.service}/messages?{qstr}'\n",
- " try:\n",
- " recs = requests.get(url, timeout=self.timeout).json()\n",
- " except Exception as err:\n",
- " warnings.warn(f'No {self.service} records retrieved: {err}')\n",
- " recs = []\n",
- " if len(recs) == 0:\n",
- " raise Exception(f'No records retrieved from {url}')\n",
- "\n",
- " self.recs = recs\n",
- " self.recs.sort(key=lambda r: r['day_obs'])\n",
- " return recs\n",
- "\n",
- " def get_observation_gaps(self, instruments=None,\n",
- " min_day_obs=None, # YYYYMMDD\n",
- " max_day_obs=None, # YYYYMMDD\n",
- " ):\n",
- " if not instruments:\n",
- " instruments = self.get_instruments()\n",
- " assert isinstance(instruments,list), \\\n",
- " f'\"instruments\" must be a list. Got {instruments!r}'\n",
- " # inst_day_rollupol[instrument] => dict[day] => exposureGapInMinutes\n",
- " inst_day_rollup = defaultdict(dict) # Instrument/Day rollup\n",
- "\n",
- " for instrum in instruments:\n",
- " recs = self.get_exposures(instrum)\n",
- " instrum_gaps = dict()\n",
- " for day,dayrecs in itertools.groupby(recs,\n",
- " key=lambda r: r['day_obs']):\n",
- " gaps = list()\n",
- " begin = end = None\n",
- " for rec in dayrecs:\n",
- " begin = rec['timespan_begin']\n",
- " if end:\n",
- " # span in minutes\n",
- " diff = (datetime.fromisoformat(begin)\n",
- " - datetime.fromisoformat(end)\n",
- " ).total_seconds() / 60.0\n",
- "\n",
- " gaps.append((\n",
- " datetime.fromisoformat(end).time().isoformat(),\n",
- " datetime.fromisoformat(begin).time().isoformat(),\n",
- " diff\n",
- " ))\n",
- " end = rec['timespan_end']\n",
- " instrum_gaps[day] = gaps\n",
- "\n",
- " #!roll = dict()\n",
- " # Rollup gap times by day\n",
- " for day,tuples in instrum_gaps.items():\n",
- " #!roll[day] = sum([t[2] for t in tuples])\n",
- " inst_day_rollup[instrum][day] = sum([t[2] for t in tuples])\n",
- "\n",
- " return inst_day_rollup"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "7",
- "metadata": {},
- "source": [
- "\n",
- "## Get Records"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "8",
- "metadata": {},
- "outputs": [],
- "source": [
- "service_adapter = ExposurelogAdapter(server_url=server)\n",
- "try:\n",
- " recs = service_adapter.get_messages(limit=limit)\n",
- "except Exception as err:\n",
- " recs = []\n",
- " msg = f'ERROR getting records from {server=}: {err=}'\n",
- " raise Exception(msg)\n",
- "\n",
- "metrics = service_adapter.analytics(recs)\n",
- "flds = metrics['fields']\n",
- "facets = metrics['facets'] # facets(field) = set(value-1, value-2, ...)\n",
- "print(f'Retrieved {len(recs)} records, each with {len(flds)} fields.')"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "9",
- "metadata": {},
- "source": [
- "\n",
- "## Tables of (mostly raw) results"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "10",
- "metadata": {},
- "source": [
- "### Fields names provided in records from log."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "11",
- "metadata": {},
- "outputs": [],
- "source": [
- "pd.DataFrame(flds, columns=['Field Name'])"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "12",
- "metadata": {},
- "source": [
- "### Facets from log records.\n",
- "A *facet* is the set all of values found for a field in the retrieved records. Facets are only calculated for some fields."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "13",
- "metadata": {},
- "outputs": [],
- "source": [
- "display(pd.DataFrame.from_dict(facets, orient='index'))\n",
- "facets"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "14",
- "metadata": {},
- "source": [
- "### Show Retrieved Records"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "15",
- "metadata": {},
- "outputs": [],
- "source": [
- "ignore_fields = ['id']\n",
- "new_column_names = dict(message_text='message',\n",
- " )\n",
- "df = pd.DataFrame(recs).rename(columns=new_column_names)\n",
- "with pd.option_context('display.max_rows', None, 'display.max_columns', None):\n",
- " display(df.loc[:, ~df.columns.isin(ignore_fields)])"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "16",
- "metadata": {},
- "source": [
- "### Table of selected log record fields.\n",
- "Table can be retrieved as CSV file for local use."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "17",
- "metadata": {},
- "outputs": [],
- "source": [
- "cols = ['obs_id', 'user_id', 'user_agent','is_human','is_valid','exposure_flag']\n",
- "df = pd.DataFrame(recs)[cols]\n",
- "\n",
- "# Allow download of CSV version of DataFrame\n",
- "csvfile = 'tl.csv'\n",
- "df.to_csv(csvfile)\n",
- "myfile = FileLink(csvfile)\n",
- "print('Table available as CSV file: ')\n",
- "display(myfile)\n",
- "df"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "18",
- "metadata": {},
- "outputs": [],
- "source": [
- "df = pd.DataFrame(recs)\n",
- "df"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "19",
- "metadata": {},
- "outputs": [],
- "source": [
- "cols = ['obs_id', 'site_id', 'instrument', 'message_text', 'tags','user_id', 'user_agent','is_human','is_valid','exposure_flag']\n",
- "df = pd.DataFrame(recs, columns=None)\n",
- "df"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "20",
- "metadata": {},
- "source": [
- "\n",
- "## Plots from log (per day)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "21",
- "metadata": {},
- "outputs": [],
- "source": [
- "gaps = service_adapter.get_observation_gaps()\n",
- "for instrument, day_gaps in gaps.items():\n",
- " display_markdown(f'### Date vs Observation Gap (minutes) for {instrument=!s}', raw=True)\n",
- " x,y = zip(*day_gaps.items())\n",
- " df = pd.DataFrame(dict(day=x,minutes=y))\n",
- " df.plot.bar(x='day', y='minutes', title=f'{instrument=!s}')"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "22",
- "metadata": {},
- "outputs": [],
- "source": [
- "dict(gaps)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "23",
- "metadata": {},
- "source": [
- "\n",
- "## Raw Content Analysis"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "24",
- "metadata": {},
- "source": [
- "### Example of one record"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "25",
- "metadata": {},
- "outputs": [],
- "source": [
- "rec = recs[-1]\n",
- "\n",
- "msg = rec[\"message_text\"]\n",
- "md = f'Message text from log:\\n> {msg}'\n",
- "display_markdown(md, raw=True)\n",
- "\n",
- "md = f'One full record (the last one retrieved):\\n> {rec}'\n",
- "display_markdown(md, raw=True)\n",
- "\n",
- "display(rec)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "26",
- "metadata": {},
- "source": [
- "\n",
- "## Stakeholder Elicitation"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "27",
- "metadata": {},
- "outputs": [],
- "source": [
- "#EXTERNAL_INSTANCE_URL\n",
- "dict(os.environ.items())"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "28",
- "metadata": {},
- "outputs": [],
- "source": [
- "print(f'Finished run: {str(datetime.now())}')"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "29",
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.10.12"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}
diff --git a/notebooks_tsqr/narrativelog.ipynb b/notebooks_tsqr/narrativelog.ipynb
deleted file mode 100644
index b8eb6ae..0000000
--- a/notebooks_tsqr/narrativelog.ipynb
+++ /dev/null
@@ -1,731 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "0",
- "metadata": {},
- "outputs": [],
- "source": [
- "# Parameters. Set defaults here.\n",
- "# Times Square replaces this cell with the user's parameters.\n",
- "record_limit = '99'"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "1",
- "metadata": {},
- "source": [
- "\n",
- "## Imports and General Setup"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "2",
- "metadata": {},
- "outputs": [],
- "source": [
- "# Only use packages available in the Rubin Science Platform\n",
- "import requests\n",
- "from collections import defaultdict\n",
- "import pandas as pd\n",
- "from pprint import pp\n",
- "from urllib.parse import urlencode\n",
- "from IPython.display import FileLink, display_markdown\n",
- "from matplotlib import pyplot as plt\n",
- "import os\n",
- "from datetime import datetime, date"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "3",
- "metadata": {},
- "outputs": [],
- "source": [
- "env = 'usdf_dev' # usdf-dev, tucson, slac, summit\n",
- "log_name = 'narrativelog'\n",
- "log = log_name\n",
- "limit = int(record_limit)\n",
- "response_timeout = 3.05 # seconds, how long to wait for connection\n",
- "read_timeout = 20 # seconds\n",
- "\n",
- "timeout = (float(response_timeout), float(read_timeout))\n",
- "\n",
- "summit = 'https://summit-lsp.lsst.codes'\n",
- "usdf = 'https://usdf-rsp-dev.slac.stanford.edu'\n",
- "tucson = 'https://tucson-teststand.lsst.codes'\n",
- "\n",
- "# Use server = tucson for dev testing\n",
- "server = os.environ.get('EXTERNAL_INSTANCE_URL', summit)\n",
- "\n",
- "service = f'{server}/{log}'\n",
- "service"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "4",
- "metadata": {},
- "source": [
- "\n",
- "## Setup Source"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "5",
- "metadata": {},
- "outputs": [],
- "source": [
- "# For Times Square, comment out next line and past next cell with contents of local python file.\n",
- "#! from lsst.ts.logging_and_reporting.source_adapters import NarrativelogAdapter\n",
- "# Once our logrep package has been installed in RSP, we can use the simpler \"import\""
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "6",
- "metadata": {},
- "outputs": [],
- "source": [
- "# Paste contents of source_adapters.py here\n",
- "\n",
- "# This file is part of ts_logging_and_reporting.\n",
- "#\n",
- "# Developed for Vera C. Rubin Observatory Telescope and Site Systems.\n",
- "# This product includes software developed by the LSST Project\n",
- "# (https://www.lsst.org).\n",
- "# See the COPYRIGHT file at the top-level directory of this distribution\n",
- "# for details of code ownership.\n",
- "#\n",
- "# This program is free software: you can redistribute it and/or modify\n",
- "# it under the terms of the GNU General Public License as published by\n",
- "# the Free Software Foundation, either version 3 of the License, or\n",
- "# (at your option) any later version.\n",
- "#\n",
- "# This program is distributed in the hope that it will be useful,\n",
- "# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
- "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
- "# GNU General Public License for more details.\n",
- "#\n",
- "# You should have received a copy of the GNU General Public License\n",
- "# along with this program. If not, see .\n",
- "\n",
- "\n",
- "############################################\n",
- "# Python Standard Library\n",
- "from urllib.parse import urlencode\n",
- "import itertools\n",
- "from datetime import datetime\n",
- "import warnings\n",
- "from collections import defaultdict\n",
- "############################################\n",
- "# External Packages\n",
- "import requests\n",
- "\n",
- "\n",
- "MAX_CONNECT_TIMEOUT = 3.1 # seconds\n",
- "MAX_READ_TIMEOUT = 90 * 60 # seconds\n",
- "\n",
- "class ApiAdapter:\n",
- " def __init__(self, *,\n",
- " server_url='https://tucson-teststand.lsst.codes',\n",
- " connect_timeout=3.05, # seconds\n",
- " read_timeout=10 * 60, # seconds\n",
- " ):\n",
- " self.server = server_url\n",
- " self.c_timeout = min(MAX_CONNECT_TIMEOUT,\n",
- " float(connect_timeout)) # seconds\n",
- " self.r_timeout = min(MAX_READ_TIMEOUT, # seconds\n",
- " float(read_timeout))\n",
- " self.timeout = (self.c_timeout, self.r_timeout)\n",
- "\n",
- " # We may be accessing several endpoints of an API.\n",
- " # If so, we will get different types of records for each.\n",
- " # The following are for the \"primary_endpoint\".\n",
- " self.ignore_fields = list()\n",
- " self.categoricals = list()\n",
- " self.foreign_keys = list()\n",
- "\n",
- "\n",
- " def analytics(self, recs, categorical_fields=None):\n",
- " non_cats = set([\n",
- " 'tags', 'urls', 'message_text', 'id', 'date_added',\n",
- " 'obs_id', 'day_obs', 'seq_num', 'parent_id', 'user_id',\n",
- " 'date_invalidated', 'date_begin', 'date_end',\n",
- " 'time_lost', # float\n",
- " # 'systems','subsystems','cscs', # values need special handling\n",
- " ])\n",
- " flds = set(recs[0].keys())\n",
- " if not categorical_fields:\n",
- " categorical_fields = flds\n",
- " ignore_fields = flds - categorical_fields\n",
- " facflds = flds - ignore_fields\n",
- "\n",
- " # facets(field) = set(value-1, value-2, ...)\n",
- " facets = {fld: set([str(r[fld])\n",
- " for r in recs if not isinstance(r[fld], list)])\n",
- " for fld in facflds}\n",
- " return dict(fields=flds,\n",
- " facet_fields=facflds,\n",
- " facets=facets,\n",
- " )\n",
- "\n",
- "\n",
- "class NarrativelogAdapter(ApiAdapter):\n",
- " service = 'narrativelog'\n",
- " primary_endpoint = 'messages'\n",
- " fields = {'category',\n",
- " 'components',\n",
- " 'cscs',\n",
- " 'date_added',\n",
- " 'date_begin',\n",
- " 'date_end',\n",
- " 'date_invalidated',\n",
- " 'id',\n",
- " 'is_human',\n",
- " 'is_valid',\n",
- " 'level',\n",
- " 'message_text',\n",
- " 'parent_id',\n",
- " 'primary_hardware_components',\n",
- " 'primary_software_components',\n",
- " 'site_id',\n",
- " 'subsystems',\n",
- " 'systems',\n",
- " 'tags',\n",
- " 'time_lost',\n",
- " 'time_lost_type',\n",
- " 'urls',\n",
- " 'user_agent',\n",
- " 'user_id'}\n",
- " filters = {\n",
- " 'site_ids',\n",
- " 'message_text', # Message text contain ...\n",
- " 'min_level', # inclusive\n",
- " 'max_level', # exclusive\n",
- " 'user_ids',\n",
- " 'user_agents',\n",
- " 'categories',\n",
- " 'exclude_categories',\n",
- " 'time_lost_types',\n",
- " 'exclude_time_lost_types',\n",
- " 'tags', # at least one must be present.\n",
- " 'exclude_tags', # all must be absent\n",
- " 'systems',\n",
- " 'exclude_systems',\n",
- " 'subsystems',\n",
- " 'exclude_subsystems',\n",
- " 'cscs',\n",
- " 'exclude_cscs',\n",
- " 'components',\n",
- " 'exclude_components',\n",
- " 'primary_software_components',\n",
- " 'exclude_primary_software_components',\n",
- " 'primary_hardware_components',\n",
- " 'exclude_primary_hardware_components',\n",
- " 'urls',\n",
- " 'min_time_lost',\n",
- " 'max_time_lost',\n",
- " 'has_date_begin',\n",
- " 'min_date_begin',\n",
- " 'max_date_begin',\n",
- " 'has_date_end',\n",
- " 'min_date_end',\n",
- " 'max_date_end',\n",
- " 'is_human', # Allowed: either, true, false; Default=either\n",
- " 'is_valid', # Allowed: either, true, false; Default=true\n",
- " 'min_date_added', # inclusive, TAI ISO string, no TZ\n",
- " 'max_date_added', # exclusive, TAI ISO string, no TZ\n",
- " 'has_date_invalidated',\n",
- " 'min_date_invalidated',\n",
- " 'max_date_invalidated',\n",
- " 'has_parent_id',\n",
- " 'order_by',\n",
- " 'offset',\n",
- " 'limit'\n",
- " }\n",
- "\n",
- " def get_messages(self,\n",
- " site_ids=None,\n",
- " message_text=None,\n",
- " min_date_end=None,\n",
- " max_date_end=None,\n",
- " is_human='either',\n",
- " is_valid='either',\n",
- " offset=None,\n",
- " limit=None\n",
- " ):\n",
- " qparams = dict(is_human=is_human, is_valid=is_valid)\n",
- " if site_ids:\n",
- " qparams['site_ids'] = site_ids\n",
- " if message_text:\n",
- " qparams['message_text'] = message_text\n",
- " if min_date_end:\n",
- " qparams['min_date_end'] = min_date_end\n",
- " if max_date_end:\n",
- " qparams['max_date_end'] = max_date_end\n",
- " if limit:\n",
- " qparams['limit'] = limit\n",
- "\n",
- " qstr = urlencode(qparams)\n",
- " url = f'{self.server}/{self.service}/messages?{qstr}'\n",
- " try:\n",
- " recs = requests.get(url, timeout=self.timeout).json()\n",
- " except Exception as err:\n",
- " warnings.warn(f'No {self.service} records retrieved: {err}')\n",
- " recs = []\n",
- " if len(recs) == 0:\n",
- " raise Exception(f'No records retrieved from {url}')\n",
- "\n",
- " self.recs = recs\n",
- " self.recs.sort(key=lambda r: r['date_begin'])\n",
- " return recs\n",
- "\n",
- " def get_timelost(self, rollup='day'):\n",
- " day_tl = dict() # day_tl[day] = totalDayTimeLost\n",
- " for day,dayrecs in itertools.groupby(\n",
- " self.recs,\n",
- " key=lambda r: datetime.fromisoformat(r['date_begin']).date().isoformat()\n",
- " ):\n",
- " day_tl[day] = sum([r['time_lost'] for r in dayrecs])\n",
- " return day_tl\n",
- "\n",
- "class ExposurelogAdapter(ApiAdapter):\n",
- " service = 'exposurelog'\n",
- " primary_endpoint = 'messages'\n",
- " fields = {'date_added',\n",
- " 'date_invalidated',\n",
- " 'day_obs',\n",
- " 'exposure_flag',\n",
- " 'id',\n",
- " 'instrument',\n",
- " 'is_human',\n",
- " 'is_valid',\n",
- " 'level',\n",
- " 'message_text',\n",
- " 'obs_id',\n",
- " 'parent_id',\n",
- " 'seq_num',\n",
- " 'site_id',\n",
- " 'tags',\n",
- " 'urls',\n",
- " 'user_agent',\n",
- " 'user_id'}\n",
- " filters = {\n",
- " 'site_ids',\n",
- " 'obs_id',\n",
- " 'instruments',\n",
- " 'min_day_obs', # inclusive, integer in form YYYMMDD\n",
- " 'max_day_obs', # exclusive, integer in form YYYMMDD\n",
- " 'min_seq_num',\n",
- " 'max_seq_num',\n",
- " 'message_text', # Message text contain ...\n",
- " 'min_level', # inclusive\n",
- " 'max_level', # exclusive\n",
- " 'tags', # at least one must be present.\n",
- " 'urls',\n",
- " 'exclude_tags', # all must be absent\n",
- " 'user_ids',\n",
- " 'user_agents',\n",
- " 'is_human', # Allowed: either, true, false; Default=either\n",
- " 'is_valid', # Allowed: either, true, false; Default=true\n",
- " 'exposure_flags',\n",
- " 'min_date_added', # inclusive, TAI ISO string, no TZ\n",
- " 'max_date_added', # exclusive, TAI ISO string, no TZ\n",
- " 'has_date_invalidated',\n",
- " 'min_date_invalidated',\n",
- " 'max_date_invalidated',\n",
- " 'has_parent_id',\n",
- " 'order_by',\n",
- " 'offset',\n",
- " 'limit'\n",
- " }\n",
- "\n",
- "\n",
- "\n",
- " def get_instruments(self):\n",
- " url = f'{self.server}/{self.service}/instruments'\n",
- " try:\n",
- " instruments = requests.get(url, timeout=self.timeout).json()\n",
- " except Exception as err:\n",
- " warnings.warn(f'No instruments retrieved: {err}')\n",
- " instruments = dict(dummy=[])\n",
- " # Flatten the lists\n",
- " return list(itertools.chain.from_iterable(instruments.values()))\n",
- "\n",
- " def get_exposures(self, instrument, registry=1):\n",
- " qparams = dict(instrument=instrument, registery=registry)\n",
- " url = f'{self.server}/{self.service}/exposures?{urlencode(qparams)}'\n",
- " try:\n",
- " recs = requests.get(url, timeout=self.timeout).json()\n",
- " except Exception as err:\n",
- " warnings.warn(f'No exposures retrieved: {err}')\n",
- " recs = []\n",
- " return recs\n",
- "\n",
- " def get_messages(self,\n",
- " site_ids=None,\n",
- " obs_ids=None,\n",
- " instruments=None,\n",
- " message_text=None,\n",
- " min_day_obs=None,\n",
- " max_day_obs=None,\n",
- " is_human='either',\n",
- " is_valid='either',\n",
- " exposure_flags=None,\n",
- " offset=None,\n",
- " limit=None\n",
- " ):\n",
- " qparams = dict(is_human=is_human, is_valid=is_valid)\n",
- " if site_ids:\n",
- " qparams['site_ids'] = site_ids\n",
- " if obs_ids:\n",
- " qparams['obs_ids'] = obs_ids\n",
- " if instruments:\n",
- " qparams['instruments'] = instruments\n",
- " if min_day_obs:\n",
- " qparams['min_day_obs'] = min_day_obs\n",
- " if max_day_obs:\n",
- " qparams['max_day_obs'] = max_day_obs\n",
- " if exposure_flags:\n",
- " qparams['exposure_flags'] = exposure_flags\n",
- " if offset:\n",
- " qparams['offset'] = offset\n",
- " if limit:\n",
- " qparams['limit'] = limit\n",
- "\n",
- " qstr = urlencode(qparams)\n",
- " url = f'{self.server}/{self.service}/messages?{qstr}'\n",
- " try:\n",
- " recs = requests.get(url, timeout=self.timeout).json()\n",
- " except Exception as err:\n",
- " warnings.warn(f'No {self.service} records retrieved: {err}')\n",
- " recs = []\n",
- " if len(recs) == 0:\n",
- " raise Exception(f'No records retrieved from {url}')\n",
- "\n",
- " self.recs = recs\n",
- " self.recs.sort(key=lambda r: r['day_obs'])\n",
- " return recs\n",
- "\n",
- " def get_observation_gaps(self, instruments=None,\n",
- " min_day_obs=None, # YYYYMMDD\n",
- " max_day_obs=None, # YYYYMMDD\n",
- " ):\n",
- " if not instruments:\n",
- " instruments = self.get_instruments()\n",
- " assert isinstance(instruments,list), \\\n",
- " f'\"instruments\" must be a list. Got {instruments!r}'\n",
- " # inst_day_rollupol[instrument] => dict[day] => exposureGapInMinutes\n",
- " inst_day_rollup = defaultdict(dict) # Instrument/Day rollup\n",
- "\n",
- " for instrum in instruments:\n",
- " recs = self.get_exposures(instrum)\n",
- " instrum_gaps = dict()\n",
- " for day,dayrecs in itertools.groupby(recs,\n",
- " key=lambda r: r['day_obs']):\n",
- " gaps = list()\n",
- " begin = end = None\n",
- " for rec in dayrecs:\n",
- " begin = rec['timespan_begin']\n",
- " if end:\n",
- " # span in minutes\n",
- " diff = (datetime.fromisoformat(begin)\n",
- " - datetime.fromisoformat(end)\n",
- " ).total_seconds() / 60.0\n",
- "\n",
- " gaps.append((\n",
- " datetime.fromisoformat(end).time().isoformat(),\n",
- " datetime.fromisoformat(begin).time().isoformat(),\n",
- " diff\n",
- " ))\n",
- " end = rec['timespan_end']\n",
- " instrum_gaps[day] = gaps\n",
- "\n",
- " #!roll = dict()\n",
- " # Rollup gap times by day\n",
- " for day,tuples in instrum_gaps.items():\n",
- " #!roll[day] = sum([t[2] for t in tuples])\n",
- " inst_day_rollup[instrum][day] = sum([t[2] for t in tuples])\n",
- "\n",
- " return inst_day_rollup\n",
- "\n",
- "\n",
- "\n",
- "# gaps,recs = logrep_utils.ExposurelogAdapter(server_url='https://usdf-rsp-dev.slac.stanford.edu').get_observation_gaps('LSSTComCam')\n",
- "\n",
- "# gaps,recs = logrep_utils.ExposurelogAdapter(server_url='[[https://tucson-teststand.lsst.codes').get_observation_gaps('LSSTComCam')\n"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "7",
- "metadata": {},
- "source": [
- "\n",
- "## Get Records"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "8",
- "metadata": {},
- "outputs": [],
- "source": [
- "service_adapter = NarrativelogAdapter(server_url=server)\n",
- "try:\n",
- " recs = service_adapter.get_messages()\n",
- "except Exception as err:\n",
- " recs = []\n",
- " msg = f'ERROR getting records from {server=}: {err=}'\n",
- " raise Exception(msg)\n",
- "\n",
- "metrics = service_adapter.analytics(recs)\n",
- "flds = metrics['fields']\n",
- "facets = metrics['facets'] # facets(field) = set(value-1, value-2, ...)\n",
- "print(f'Retrieved {len(recs)} records, each with {len(flds)} fields.')"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "9",
- "metadata": {},
- "outputs": [],
- "source": [
- "date_tl = service_adapter.get_timelost()"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "10",
- "metadata": {},
- "source": [
- "\n",
- "## Tables of (mostly raw) results"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "11",
- "metadata": {},
- "source": [
- "### Fields names provided in records from log."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "12",
- "metadata": {},
- "outputs": [],
- "source": [
- "pd.DataFrame(flds, columns=['Field Name'])"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "13",
- "metadata": {},
- "source": [
- "### Facets from log records.\n",
- "A *facet* is the set all of values found for a field in the retrieved records. Facets are only calculated for some fields."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "14",
- "metadata": {},
- "outputs": [],
- "source": [
- "display(pd.DataFrame.from_dict(facets, orient='index'))\n",
- "facets"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "15",
- "metadata": {},
- "source": [
- "### Show Retrieved Records"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "16",
- "metadata": {},
- "outputs": [],
- "source": [
- "ignore_fields = ['id']\n",
- "new_column_names = dict(message_text='message',\n",
- " primary_software_components='PSC',\n",
- " primary_hardware_components='PHC',\n",
- " )\n",
- "\n",
- "df = pd.DataFrame(recs).rename(columns=new_column_names)\n",
- "with pd.option_context('display.max_rows', None, 'display.max_columns', None):\n",
- " display(df.loc[:, ~df.columns.isin(ignore_fields)])"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "17",
- "metadata": {},
- "source": [
- "### Table of selected log record fields.\n",
- "Table can be retrieved as CSV file for local use."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "18",
- "metadata": {},
- "outputs": [],
- "source": [
- "cols = ['date_added', 'time_lost', 'time_lost_type']\n",
- "df = pd.DataFrame(recs)[cols]\n",
- "\n",
- "# Allow download of CSV version of DataFrame\n",
- "csvfile = 'tl.csv'\n",
- "df.to_csv(csvfile)\n",
- "myfile = FileLink(csvfile)\n",
- "print('Table available as CSV file: ')\n",
- "display(myfile)\n",
- "df"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "19",
- "metadata": {},
- "outputs": [],
- "source": [
- "cols = ['message_text','tags','user_id', 'components','date_end']\n",
- "df = pd.DataFrame(recs, columns=cols)\n",
- "df"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "20",
- "metadata": {},
- "source": [
- "\n",
- "## Plots from log (per day)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "21",
- "metadata": {},
- "outputs": [],
- "source": [
- "display_markdown(f'### Date vs Time Lost', raw=True)\n",
- "x = date_tl.keys()\n",
- "y = date_tl.values()\n",
- "\n",
- "df = pd.DataFrame(dict(day=x,time_lost=y))\n",
- "df.plot.bar(x='day', y='time_lost', title=f'Time Lost')\n",
- "display(df)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "22",
- "metadata": {},
- "source": [
- "\n",
- "## Raw Content Analysis"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "23",
- "metadata": {},
- "source": [
- "### Example of one record"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "24",
- "metadata": {},
- "outputs": [],
- "source": [
- "rec = recs[-1]\n",
- "\n",
- "msg = rec[\"message_text\"]\n",
- "md = f'Message text from log:\\n> {msg}'\n",
- "display_markdown(md, raw=True)\n",
- "\n",
- "md = f'One full record (the last one retrieved):\\n> {rec}'\n",
- "display_markdown(md, raw=True)\n",
- "\n",
- "display(rec)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "25",
- "metadata": {},
- "source": [
- "\n",
- "## Stakeholder Elicitation"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "26",
- "metadata": {},
- "outputs": [],
- "source": [
- "print(f'Finished run: {str(datetime.now())}')"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "27",
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.10.12"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}
diff --git a/notebooks_tsqr/sources_dashboard.ipynb b/notebooks_tsqr/sources_dashboard.ipynb
deleted file mode 100644
index eb05fca..0000000
--- a/notebooks_tsqr/sources_dashboard.ipynb
+++ /dev/null
@@ -1,74 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "0",
- "metadata": {},
- "outputs": [],
- "source": [
- "from lsst.ts.logging_and_reporting.source_adapters import Dashboard"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "1",
- "metadata": {},
- "outputs": [],
- "source": [
- "status = Dashboard().report(timeout=0.5)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "2",
- "metadata": {},
- "outputs": [],
- "source": [
- "status"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "3",
- "metadata": {},
- "outputs": [],
- "source": [
- "successes = [k for k,v in status.items() if v == 200]\n",
- "successes"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "4",
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.10.12"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}
diff --git a/python/lsst/ts/logging_and_reporting/almanac.py b/python/lsst/ts/logging_and_reporting/almanac.py
new file mode 100644
index 0000000..a527629
--- /dev/null
+++ b/python/lsst/ts/logging_and_reporting/almanac.py
@@ -0,0 +1,81 @@
+from datetime import datetime, date, time, timedelta
+import math
+from astroplan import Observer
+from astropy.time import Time
+
+class Almanac:
+
+ def __init__(self, *, day_obs=None, site='Rubin'):
+ if day_obs is None:
+ astro_day = date.today() - timedelta(days=1)
+ else:
+ astro_day = datetime.strptime(str(day_obs), '%Y%m%d').date()
+
+ self.observer = Observer.at_site(site, timezone='Chile/Continental')
+ self.astro_day = astro_day
+ self.astro_noon = datetime.combine(self.astro_day,time(12))
+
+ self.get_moon()
+ self.get_sun()
+
+ def get_moon(self):
+ self.moon_rise_time = self.observer.moon_rise_time(self.astro_noon)
+ self.moon_set_time = self.observer.moon_set_time(self.astro_noon)
+
+ # Percent of moon lit
+ self.moon_illum = self.observer.moon_illumination(self.astro_noon)
+
+ def get_sun(self):
+ time = self.observer.datetime_to_astropy_time(self.astro_noon)
+
+ # ast(ronoimical) twilight: -18 degrees)
+ self.ast_twilight_morning = self.observer.twilight_morning_astronomical(
+ time)
+ self.ast_twilight_evening = self.observer.twilight_evening_astronomical(
+ time)
+
+ # nau(tical) twilight: -12 degrees)
+ self.nau_twilight_morning = self.observer.twilight_morning_nautical(
+ time)
+ self.nau_twilight_evening = self.observer.twilight_evening_nautical(
+ time)
+
+ # civ(il) twilight: -6 degrees)
+ self.civ_twilight_morning = self.observer.twilight_morning_civil(
+ time)
+ self.civ_twilight_evening = self.observer.twilight_evening_civil(
+ time)
+
+ self.sun_rise_time = self.observer.sun_rise_time(time)
+ self.sun_set_time = self.observer.sun_set_time(time)
+
+ @property
+ def as_dict(self):
+ data_dict = {
+ 'Moon Rise': self.moon_rise_time.iso,
+ 'Moon Set': self.moon_set_time.iso,
+ 'Moon Illumination': f'{self.moon_illum:.0%}',
+
+ 'Astronomical Twilight (morning)': self.ast_twilight_morning.iso,
+ 'Astronomical Twilight (evening)': self.ast_twilight_evening.iso,
+ 'Nautical Twilight (morning)': self.nau_twilight_morning.iso,
+ 'Nautical Twilight (evening)': self.nau_twilight_evening.iso,
+ 'Civil Twilight (morning)': self.civ_twilight_morning.iso,
+ 'Civil Twilight (evening)': self.civ_twilight_evening.iso,
+ 'Sun Rise': self.sun_rise_time.iso,
+ 'Sun Set': self.sun_set_time.iso,
+ }
+ help_dict = {
+ 'Moon Rise': '',
+ 'Moon Set': '',
+ 'Moon Illumination': '(% lit)',
+ 'Astronomical Twilight (morning)': '(-18 degrees)',
+ 'Astronomical Twilight (evening)': '(-18 degrees)',
+ 'Nautical Twilight (morning)': '(-12 degrees)',
+ 'Nautical Twilight (evening)': '(-12 degrees)',
+ 'Civil Twilight (morning)': '(-6 degrees)',
+ 'Civil Twilight (evening)': '(-6 degrees)',
+ 'Sun Rise': '',
+ 'Sun Set': '',
+ }
+ return data_dict, help_dict
diff --git a/python/lsst/ts/logging_and_reporting/dashboard.py b/python/lsst/ts/logging_and_reporting/dashboard.py
new file mode 100644
index 0000000..dddae68
--- /dev/null
+++ b/python/lsst/ts/logging_and_reporting/dashboard.py
@@ -0,0 +1,100 @@
+# Python Standard Library
+from collections import defaultdict
+from warnings import warn
+import lsst.ts.logging_and_reporting.source_adapters as sad
+# External Packages
+import requests
+
+
+class Dashboard: # TODO Move to its own file (utils.py).
+ """Verify that we can get to all the API endpoints and databases we need for
+ any of our sources.
+ """
+ timeout = 0.8
+
+ envs = dict( # key, server
+ summit = 'https://summit-lsp.lsst.codes',
+ usdf_dev = 'https://usdf-rsp-dev.slac.stanford.edu',
+ tucson = 'https://tucson-teststand.lsst.codes',
+ # Environments not currently used:
+ # rubin_usdf_dev = '',
+ # data_lsst_cloud = '',
+ # usdf = '',
+ # base_data_facility = '',
+ # rubin_idf_int = '',
+ )
+ adapters = [sad.ExposurelogAdapter,
+ sad.NarrativelogAdapter,
+ sad.NightReportAdapter,
+ ]
+
+ def get_sample_data(self, server):
+
+ samples = defaultdict(dict) # samples[endpoint_url] -> one_record_dict
+ for adapter in self.adapters:
+ sa = adapter(server_url=server, limit=1)
+ for ep in sa.endpoints:
+ qstr = '?instrument=LSSTComCamSim' if ep == 'exposures' else ''
+ url = f'{server}/{sa.service}/{ep}{qstr}'
+ try:
+ res = requests.get(url, timeout=self.timeout)
+ recs = res.json()
+ if isinstance(recs, dict):
+ samples[url] = recs
+ else:
+ samples[url] = recs[0]
+ except Exception as err:
+ warn(f'Could not get data from {url}: {res.content=} {err=}')
+ samples[url] = None
+ return dict(samples)
+
+
+ def report(self, timeout=None):
+ """Check our ability to connect to every Source on every Environment.
+ Report a summary.
+
+ RETURN: percentage of good connectons.
+ """
+ url_status = dict() # url_status[endpoint_url] = http _status_code
+ working = set() # Set of servers that work for all our required endpoints.
+
+ for env,server in self.envs.items():
+ server_all_good = True
+ for adapter in self.adapters:
+ service = adapter(server_url=server)
+ stats, adapter_all_good = service.check_endpoints(timeout=timeout)
+ url_status.update(stats)
+ server_all_good &= adapter_all_good
+ if server_all_good:
+ working.add(server)
+
+ total_cnt = good_cnt = 0
+ good = list()
+ bad = list()
+ for url,stat in url_status.items():
+ total_cnt += 1
+ if stat == 200:
+ good_cnt += 1
+ good.append(url)
+ else:
+ bad.append((url,stat))
+
+ print(f'\nConnected to {good_cnt} out of {total_cnt} endpoints.'
+ f'({good_cnt/total_cnt:.0%})'
+ )
+ goodstr = "\n\t".join(good)
+ print(f'Successful connects ({good_cnt}): ')
+ for gurl in good:
+ print(f'\t{gurl}')
+
+ print(f'Failed connects ({total_cnt - good_cnt}): ')
+ for burl,stat in bad:
+ print(f'\t{stat}: {burl}')
+
+ status = dict(num_good=good_cnt,
+ num_total=total_cnt,
+ good_urls=good,
+ bad_ursl=bad,
+ )
+ return good_cnt/total_cnt, working
+# END: class Dashboard
diff --git a/python/lsst/ts/logging_and_reporting/exceptions.py b/python/lsst/ts/logging_and_reporting/exceptions.py
new file mode 100644
index 0000000..07fe44f
--- /dev/null
+++ b/python/lsst/ts/logging_and_reporting/exceptions.py
@@ -0,0 +1,78 @@
+# This file is part of ts_logging_and_reporting.
+#
+# Developed for Vera C. Rubin Observatory Telescope and Site Systems.
+# This product includes software developed by the LSST Project
+# (https://www.lsst.org).
+# See the COPYRIGHT file at the top-level directory of this distribution
+# for details of code ownership.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+# error_code values should be no bigger than 8 characters 12345678
+import traceback
+from warnings import warn
+
+class BaseLogrepException(Exception):
+ is_an_error_response = True
+ status_code = 400
+ error_message = ''
+ saved_tb = None
+
+ def get_subclass_name(self):
+ return self.__class__.__name__
+
+ def __init__(self, error_message, error_code=None, status_code=None):
+ Exception.__init__(self)
+ self.error_message = error_message
+ if error_code:
+ assert len(error_code) <= 8, f'error_code "{error_code}" too big'
+ self.error_code = error_code
+
+ if error_code is not None:
+ self.error_code = error_code
+ if status_code is not None:
+ self.status_code = status_code or self.status_code
+
+ self.saved_tb = traceback.format_exc()
+
+ def __str__(self):
+ return (f'[{self.error_code}] {self.error_message}'
+ f' {self.saved_tb=}')
+
+ def to_dict(self):
+ dd = dict(errorMessage=self.error_message,
+ errorCode=self.error_code,
+ #! trace=self.saved_tb,
+ statusCode=self.status_code)
+ return dd
+
+
+example_error_from_exposurelog = {
+ 'detail': [
+ {'type': 'int_parsing',
+ 'loc': ['query', 'min_day_obs'],
+ 'msg': 'Input should be a valid integer, unable to parse string as an integer',
+ 'input': '2024-08-19'},
+ {'type': 'int_parsing',
+ 'loc': ['query', 'max_day_obs'],
+ 'msg': 'Input should be a valid integer, unable to parse string as an integer',
+ 'input': '2024-09-21'}]}
+
+
+class BadStatus(BaseLogrepException):
+ """Non-200 HTTP status from API endpoint. Typically
+ this will occur when a URL query string parameter is passed a value with
+ a bad format. It may also be that the Service is broken.
+ """
+ error_code = 'BADQSTR'
diff --git a/python/lsst/ts/logging_and_reporting/reports.py b/python/lsst/ts/logging_and_reporting/reports.py
new file mode 100644
index 0000000..ef63418
--- /dev/null
+++ b/python/lsst/ts/logging_and_reporting/reports.py
@@ -0,0 +1,135 @@
+# This file is part of ts_logging_and_reporting.
+#
+# Developed for Vera C. Rubin Observatory Telescope and Site Systems.
+# This product includes software developed by the LSST Project
+# (https://www.lsst.org).
+# See the COPYRIGHT file at the top-level directory of this distribution
+# for details of code ownership.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+# Python Standard Library
+from urllib.parse import urlencode
+import itertools
+from datetime import datetime, date, time, timedelta
+from warnings import warn
+from collections import defaultdict
+from abc import ABC
+# External Packages
+import requests
+from IPython.display import display, Markdown
+import pandas as pd
+# Local Packages
+import lsst.ts.logging_and_reporting.almanac as alm
+
+def md(markdown_str, color=None):
+ # see https://www.w3schools.com/colors/colors_names.asp
+ if color:
+ display(Markdown(f"{markdown_str}"))
+ else:
+ display(Markdown(markdown_str))
+
+def mdlist(markdown_list, color=None):
+ if markdown_list is None:
+ return
+
+ for markdown_str in markdown_list:
+ md(markdown_str, color=color)
+
+
+def dict_to_md(in_dict):
+ md_list = list()
+ for key, content_list in in_dict.items():
+ md_list.append(f'- {key}')
+ for elem in content_list:
+ md_list.append(f' - {elem}')
+ return md_list
+
+def adapter_overview(adapter, status, limit):
+ count = status["number_of_records"]
+ error = status["error"]
+ more = '(There may be more.)' if count >= limit else ''
+ result = error if error else f'Got {count} records. '
+ mdlist([f'## Overview for Service: `{adapter.service}` [{count}]',
+ f'- Endpoint: {status["endpoint_url"]}',
+ ])
+ print(f'- {result} {more}')
+
+
+# TODO move all instances of "row_header", "row_str_func" from source_adapters to here.
+class Report(ABC):
+ def __init__(self, *,
+ min_day_obs=None, # INCLUSIVE: default=Yesterday
+ max_day_obs=None, # EXCLUSIVE: default=Today
+ ):
+ self.min_day_obs = min_day_obs
+ self.max_day_obs = max_day_obs
+
+ def time_log_as_markdown(self, source_adapter, url,
+ log_title=None,
+ zero_message=False,
+ ):
+ records = source_adapter.records
+ service = source_adapter.service
+ title = log_title if log_title else ''
+ if records:
+ md(f'### {title}')
+ table = source_adapter.day_table('date_added')
+ mdlist(table)
+ else:
+ if zero_message:
+ md(f'No {service} records found.', color='lightblue')
+ md(f'Used [API Data]({url})')
+
+class AlmanacReport(Report):
+ # moon rise,set,illumination %
+ # (astronomical,nautical,civil) twilight (morning,evening)
+ # sun rise,set
+ def almanac_as_dataframe(self):
+ # This display superfluous header: "0, 1"
+ return pd.DataFrame(alm.Almanac().as_dict).T
+
+
+class NightlyLogReport(Report):
+
+ def block_tickets_as_markdown(self, tickets,
+ title='## Nightly Jira BLOCKs'):
+ # tickets[day_obs] = {ticket_url, ...}
+ mdstr = ''
+ if title:
+ mdstr += title
+
+ for day, url_list in tickets.items():
+ mdstr += f'\n- {day}'
+ for ticket_url in url_list:
+ mdstr += f'\n - [{ticket_url.replace(front,"")}]({ticket_url})'
+ return mdstr
+
+
+class ExposurelogReport(Report):
+
+ # date, time, obs_id, message_text
+ def time_log_as_markown(self, records,
+ title='# Exposure Log'):
+ pass # TODO use "day_table"
+
+ def daily_observation_gap(self, min_day_obs, max_day_obs):
+ pass
+
+class NarrativelogReport(Report):
+
+ # date, time, message_text
+ def time_log_as_markown(self, records,
+ title='# Exposure Log'):
+ pass # TODO use "day_table"
diff --git a/python/lsst/ts/logging_and_reporting/source_adapters.py b/python/lsst/ts/logging_and_reporting/source_adapters.py
index 8679dbc..cc32d3f 100644
--- a/python/lsst/ts/logging_and_reporting/source_adapters.py
+++ b/python/lsst/ts/logging_and_reporting/source_adapters.py
@@ -20,92 +20,171 @@
# along with this program. If not, see .
-############################################
+'''\
+TODO: This is considered Proof of Concept code.
+Tests and documentation exist minimally or not at all since until the
+concept is Proven, it all might be thrown away or rewritten.
+'''
+
# Python Standard Library
from urllib.parse import urlencode
import itertools
-from datetime import datetime
+from datetime import datetime, date, time, timedelta
from warnings import warn
from collections import defaultdict
from abc import ABC
-
-############################################
# External Packages
import requests
+# Local Packages
+import lsst.ts.logging_and_reporting.utils as ut
+import lsst.ts.logging_and_reporting.exceptions as ex
-MAX_CONNECT_TIMEOUT = 3.1 # seconds
-MAX_READ_TIMEOUT = 90 * 60 # seconds
-def keep_fields(outfields, recs):
- """Keep only keys in OUTFIELDS list of RECS (list of dicts)
- SIDE EFFECT: Removes extraneous keys from all dicts in RECS.
- """
- if outfields:
- for rec in recs:
- nukefields = set(rec.keys()) - set(outfields)
- print(f'{rec=} {nukefields=}')
- for f in nukefields:
- del rec[f]
+MAX_CONNECT_TIMEOUT = 3.1 # seconds
+MAX_READ_TIMEOUT = 180 # seconds
+
+
+def all_endpoints(server):
+ endpoints = itertools.chain.from_iterable(
+ [sa(server_url=server).used_endpoints() for sa in adapters]
+ )
+ return list(endpoints)
+
+
+def validate_response(response, endpoint_url):
+ if response.status_code == 200:
+ return True
+ else:
+ msg = f'Error: {response.json()} {endpoint_url=} {response.reason}'
+ raise ex.BadStatus(msg)
+
+
class SourceAdapter(ABC):
"""Abstract Base Class for all source adapters.
"""
# TODO document class including all class variables.
def __init__(self, *,
server_url='https://tucson-teststand.lsst.codes',
+ min_day_obs=None, # INCLUSIVE: default=Yesterday
+ max_day_obs=None, # EXCLUSIVE: default=Today other=YYYY-MM-DD
+ limit=99,
+ offset=0,
connect_timeout=1.05, # seconds
read_timeout=2, # seconds
):
+ if min_day_obs is None: # Inclusive
+ min_day_obs = ut.datetime_to_day_obs(
+ datetime.today() - timedelta(days=1))
+ if max_day_obs is None: # Exclusive
+ max_day_obs = ut.datetime_to_day_obs(
+ datetime.today() + timedelta(days=1))
self.server = server_url
+ self.min_day_obs = min_day_obs
+ self.max_day_obs = max_day_obs
+ self.min_date = ut.get_datetime_from_day_obs_str(min_day_obs)
+ self.max_date = ut.get_datetime_from_day_obs_str(max_day_obs)
+ self.limit = limit
+ self.offset = offset
self.c_timeout = min(MAX_CONNECT_TIMEOUT,
float(connect_timeout)) # seconds
self.r_timeout = min(MAX_READ_TIMEOUT, # seconds
float(read_timeout))
self.timeout = (self.c_timeout, self.r_timeout)
+ self.records = None # else: list of dict
# Provide the following in subclass
output_fields = None
service = None
endpoints = None
+
+
+ def keep_fields(self, recs, outfields):
+ """Keep only keys in OUTFIELDS list of RECS (list of dicts)
+ SIDE EFFECT: Removes extraneous keys from all dicts in RECS.
+ """
+ if outfields:
+ for rec in recs:
+ nukefields = set(rec.keys()) - set(outfields)
+ for f in nukefields:
+ del rec[f]
+
+ @property
+ def row_header(self):
+ return '| Time | Message |\n|--------|------|'
+
+ def row_str_func(self, datetime_str, rec):
+ msg = rec['message_text']
+ return f'> {datetime_str} | {msg}
'
+
+
+ # Break on DAY_OBS. Within that, break on DATE, within that only show time.
+ def day_table(self, datetime_field,
+ dayobs_field=None,
+ row_str_func=None,
+ zero_message=False,
+ ):
+ def obs_night(rec):
+ if 'day_obs' in rec:
+ return ut.day_obs_str(rec['day_obs']) # -> # "YYYY-MM-DD"
+ else:
+ dt = datetime.fromisoformat(rec[datetime_field])
+ return ut.datetime_to_day_obs(dt)
+
+ def obs_date(rec):
+ dt = datetime.fromisoformat(rec[datetime_field])
+ return dt.replace(microsecond=0)
+
+ recs = self.records
+ if len(recs) == 0:
+ if zero_message:
+ print('Nothing to display.')
+ return
+ dates = set([obs_date(r).date() for r in recs])
+ table = list()
+ # Group by night.
+ recs = sorted(recs,key=lambda r: obs_night(r))
+ for night,g0 in itertools.groupby(recs, key=lambda r: obs_night(r)):
+ # Group by date
+ table.append(f'## NIGHT: {night}: ')
+ for date,g1 in itertools.groupby(g0, key=lambda r: obs_date(r)):
+ table.append(f'### DATE: {date.date()}: ')
+ for rec in g0:
+ dt = obs_date(rec)
+ dtstr = str(dt.time())
+ table.append(f'{self.row_str_func(dtstr, rec)}')
+ table.append(':EOT')
+ return table
+
+
@property
def source_url(self):
return f'{self.server}/{self.service}'
-
- def check_endpoints(self, timeout=None):
- to = (timeout or self.timeout)
- print(f'Try connect to each endpoint of {self.server}/{self.service} '
- f'using timeout={to}.')
- url_http_status_code = dict()
+ def used_endpoints(self):
+ used = list()
for ep in self.endpoints:
- url = f'{self.server}/{self.service}/{ep}'
- try:
- r = requests.get(url, timeout=(timeout or self.timeout))
- except:
- url_http_status_code[url] = 'timeout'
- else:
- url_http_status_code[url] = r.status_code
- return url_http_status_code
-
- service = None
- endpoints = None
+ used.append(f'{self.server}/{self.service}/{ep}')
+ return used
- def check_endpoints(self, timeout=None):
+ def check_endpoints(self, timeout=None, verbose=True):
to = (timeout or self.timeout)
- print(f'Try connect to each endpoint of {self.server}/{self.service} '
- f'using timeout={to}.')
+ if verbose:
+ print(f'Try connect to each endpoint of'
+ f' {self.server}/{self.service} ')
url_http_status_code = dict()
for ep in self.endpoints:
url = f'{self.server}/{self.service}/{ep}'
try:
r = requests.get(url, timeout=(timeout or self.timeout))
- except:
- url_http_status_code[url] = 'timeout'
+ validate_response(r, url)
+ except Exception as err:
+ url_http_status_code[url] = 'GET error'
else:
url_http_status_code[url] = r.status_code
- return url_http_status_code
+ return url_http_status_code, all([v==200 for v in url_http_status_code.values()])
def analytics(self, recs, categorical_fields=None):
@@ -135,6 +214,7 @@ def analytics(self, recs, categorical_fields=None):
facet_fields=facflds,
facets=facets,
)
+# END: class SourceAdapter
# Not available on SLAC (usdf) as of 9/9/2024.
@@ -142,136 +222,166 @@ class NightReportAdapter(SourceAdapter):
service = "nightreport"
endpoints = ['reports']
primary_endpoint = 'reports'
+ outfields = {
+ 'confluence_url',
+ 'date_added',
+ 'date_invalidated',
+ 'date_sent',
+ 'day_obs',
+ 'id',
+ 'is_valid',
+ 'observers_crew',
+ 'parent_id',
+ 'site_id',
+ 'summary',
+ 'telescope',
+ 'telescope_status',
+ 'user_agent',
+ 'user_id',
+ }
+
+ def row_str_func(self, datetime_str, rec):
+ return f"> {datetime_str} | {rec['summary']}
"
+
+ def get_reports(self,
+ site_ids=None,
+ summary=None,
+ is_human='either',
+ is_valid='either',
+ ):
+ qparams = dict(is_human=is_human, is_valid=is_valid)
+ if site_ids:
+ qparams['site_ids'] = site_ids
+ if summary:
+ qparams['summary'] = summary
+ if self.min_day_obs:
+ qparams['min_day_obs'] = ut.day_obs_int(self.min_day_obs)
+ if self.max_day_obs:
+ qparams['max_day_obs'] = ut.day_obs_int(self.max_day_obs)
+ if self.limit:
+ qparams['limit'] = self.limit
+
+ qstr = urlencode(qparams)
+ url = f'{self.server}/{self.service}/reports?{qstr}'
+ error = None
+ try:
+ response = requests.get(url, timeout=self.timeout)
+ validate_response(response, url)
+ recs = response.json()
+ recs.sort(key=lambda r: r['day_obs'])
+ except Exception as err:
+ recs = []
+ error = f'{response.text=} Exception={err}'
+
+ self.keep_fields(recs, self.outfields)
+ self.records = recs
+ status = dict(
+ endpoint_url=url,
+ number_of_records=len(recs),
+ error=error,
+ )
+ return status
+
+ def nightly_tickets(self, recs):
+ tickets = defaultdict(set) # tickets[day_obs] = {ticket_url, ...}
+ for r in recs:
+ ticket_url = r['confluence_url']
+ if ticket_url:
+ tickets[r['day_obs']].add(ticket_url)
+ return {dayobs:list(urls) for dayobs,urls in tickets.items()}
+
class NarrativelogAdapter(SourceAdapter):
- """TODO full documentation
- """
service = 'narrativelog'
endpoints = ['messages',]
primary_endpoint = 'messages'
- fields = {'category',
- 'components',
- 'cscs',
- 'date_added',
- 'date_begin',
- 'date_end',
- 'date_invalidated',
- 'id',
- 'is_human',
- 'is_valid',
- 'level',
- 'message_text',
- 'parent_id',
- 'primary_hardware_components',
- 'primary_software_components',
- 'site_id',
- 'subsystems',
- 'systems',
- 'tags',
- 'time_lost',
- 'time_lost_type',
- 'urls',
- 'user_agent',
- 'user_id'}
- filters = {
- 'site_ids',
- 'message_text', # Message text contain ...
- 'min_level', # inclusive
- 'max_level', # exclusive
- 'user_ids',
- 'user_agents',
- 'categories',
- 'exclude_categories',
- 'time_lost_types',
- 'exclude_time_lost_types',
- 'tags', # at least one must be present.
- 'exclude_tags', # all must be absent
- 'systems',
- 'exclude_systems',
- 'subsystems',
- 'exclude_subsystems',
- 'cscs',
- 'exclude_cscs',
- 'components',
- 'exclude_components',
- 'primary_software_components',
- 'exclude_primary_software_components',
- 'primary_hardware_components',
- 'exclude_primary_hardware_components',
- 'urls',
- 'min_time_lost',
- 'max_time_lost',
- 'has_date_begin',
- 'min_date_begin',
- 'max_date_begin',
- 'has_date_end',
- 'min_date_end',
- 'max_date_end',
- 'is_human', # Allowed: either, true, false; Default=either
- 'is_valid', # Allowed: either, true, false; Default=true
- 'min_date_added', # inclusive, TAI ISO string, no TZ
- 'max_date_added', # exclusive, TAI ISO string, no TZ
- 'has_date_invalidated',
- 'min_date_invalidated',
- 'max_date_invalidated',
- 'has_parent_id',
- 'order_by',
- 'offset',
- 'limit'
+ outfields = {
+ # 'category',
+ # 'components',
+ # 'cscs',
+ 'date_added',
+ # 'date_begin',
+ # 'date_end',
+ # 'date_invalidated',
+ # 'id',
+ # 'is_human',
+ # 'is_valid',
+ # 'level',
+ 'message_text',
+ # 'parent_id',
+ # 'primary_hardware_components',
+ # 'primary_software_components',
+ # 'site_id',
+ # 'subsystems',
+ # 'systems',
+ # 'tags',
+ 'time_lost',
+ 'time_lost_type',
+ # 'urls',
+ # 'user_agent',
+ # 'user_id',
}
def get_messages(self,
site_ids=None,
message_text=None,
- min_date_end=None,
- max_date_end=None,
is_human='either',
is_valid='either',
offset=None,
- limit=None,
- outfields=None,
):
- qparams = dict(is_human=is_human, is_valid=is_valid)
+ qparams = dict(
+ is_human=is_human,
+ is_valid=is_valid,
+ order_by='-date_begin',
+ )
if site_ids:
qparams['site_ids'] = site_ids
if message_text:
qparams['message_text'] = message_text
- if min_date_end:
- qparams['min_date_end'] = min_date_end
- if max_date_end:
- qparams['max_date_end'] = max_date_end
- if limit:
- qparams['limit'] = limit
+ if self.min_day_obs:
+ qparams['min_date_added'] = datetime.combine(
+ self.min_date, time()
+ ).isoformat()
+ if self.max_day_obs:
+ qparams['max_date_added'] = datetime.combine(
+ self.max_date, time()
+ ).isoformat()
+ if self.limit:
+ qparams['limit'] = self.limit
qstr = urlencode(qparams)
url = f'{self.server}/{self.service}/messages?{qstr}'
+ error = None
try:
- recs = requests.get(url, timeout=self.timeout).json()
+ r = requests.get(url, timeout=self.timeout)
+ validate_response(r, url)
+ recs = r.json()
recs.sort(key=lambda r: r['date_begin'])
except Exception as err:
- warn(f'No {self.service} records retrieved: {err}')
recs = []
+ error = str(err)
- keep_fields(outfields, recs)
- self.recs = recs
- return self.recs
+ self.keep_fields(recs, self.outfields)
+ self.records = recs
+ status = dict(
+ endpoint_url=url,
+ number_of_records=len(recs),
+ error=error,
+ )
+ return status
+
+ def get_timelost(self, recs, rollup='day'):
+ def iso_date_begin(rec):
+ return datetime.fromisoformat(rec['date_begin']).date().isoformat()
- def get_timelost(self, rollup='day'):
day_tl = dict() # day_tl[day] = totalDayTimeLost
- for day,dayrecs in itertools.groupby(
- self.recs,
- key=lambda r: datetime.fromisoformat(r['date_begin']).date().isoformat()
- ):
+ for day,dayrecs in itertools.groupby(recs, key=iso_date_begin):
day_tl[day] = sum([r['time_lost'] for r in dayrecs])
return day_tl
+# END: class NarrativelogAdapter
-class ExposurelogAdapter(SourceAdapter):
- """TODO full documentation
- EXAMPLES:
- gaps,recs = logrep_utils.ExposurelogAdapter(server_url='https://usdf-rsp-dev.slac.stanford.edu').get_observation_gaps('LSSTComCam')
- gaps,recs = logrep_utils.ExposurelogAdapter(server_url='[[https://tucson-teststand.lsst.codes').get_observation_gaps('LSSTComCam')
- """
+class ExposurelogAdapter(SourceAdapter):
ignore_fields = ['id']
service = 'exposurelog'
endpoints = [
@@ -280,59 +390,46 @@ class ExposurelogAdapter(SourceAdapter):
'messages',
]
primary_endpoint = 'messages'
- fields = {'date_added',
- 'date_invalidated',
- 'day_obs',
- 'exposure_flag',
- 'id',
- 'instrument',
- 'is_human',
- 'is_valid',
- 'level',
- 'message_text',
- 'obs_id',
- 'parent_id',
- 'seq_num',
- 'site_id',
- 'tags',
- 'urls',
- 'user_agent',
- 'user_id'}
- filters = {
- 'site_ids',
+ outfields = {
+ 'date_added',
+ # 'date_invalidated',
+ 'day_obs',
+ # 'exposure_flag',
+ # 'id',
+ 'instrument',
+ # 'is_human',
+ # 'is_valid',
+ # 'level',
+ 'message_text',
'obs_id',
- 'instruments',
- 'min_day_obs', # inclusive, integer in form YYYMMDD
- 'max_day_obs', # exclusive, integer in form YYYMMDD
- 'min_seq_num',
- 'max_seq_num',
- 'message_text', # Message text contain ...
- 'min_level', # inclusive
- 'max_level', # exclusive
- 'tags', # at least one must be present.
- 'urls',
- 'exclude_tags', # all must be absent
- 'user_ids',
- 'user_agents',
- 'is_human', # Allowed: either, true, false; Default=either
- 'is_valid', # Allowed: either, true, false; Default=true
- 'exposure_flags',
- 'min_date_added', # inclusive, TAI ISO string, no TZ
- 'max_date_added', # exclusive, TAI ISO string, no TZ
- 'has_date_invalidated',
- 'min_date_invalidated',
- 'max_date_invalidated',
- 'has_parent_id',
- 'order_by',
- 'offset',
- 'limit'
- }
+ # 'parent_id',
+ # 'seq_num',
+ # 'site_id',
+ # 'tags',
+ # 'urls',
+ # 'user_agent',
+ # 'user_id',
+ }
+
+ @property
+ def row_header(self):
+ return('| Time | OBS ID | Telescope | Message |\n'
+ '|------|--------|-----------|---------|'
+ )
+ def row_str_func(self, datetime_str, rec):
+ return(f"> {datetime_str} "
+ f"| {rec['obs_id']} "
+ f"| {rec['instrument']} "
+ f"| {rec['message_text']}
"
+ )
- def check_endpoints(self, timeout=None):
+
+ def check_endpoints(self, timeout=None, verbose=True):
to = (timeout or self.timeout)
- print(f'Try connect to each endpoint of {self.server}/{self.service} '
- f'using timeout={to}.')
+ if verbose:
+ print(f'Try connect to each endpoint of '
+ f'{self.server}/{self.service} ')
url_http_status_code = dict()
for ep in self.endpoints:
@@ -340,30 +437,34 @@ def check_endpoints(self, timeout=None):
url = f'{self.server}/{self.service}/{ep}{qstr}'
try:
r = requests.get(url, timeout=to)
- except:
- url_http_status_code[url] = 'timeout'
+ validate_response(r, url)
+ except Exception as err:
+ url_http_status_code[url] = 'GET error'
else:
url_http_status_code[url] = r.status_code
- return url_http_status_code
-
+ return url_http_status_code, all([v==200 for v in url_http_status_code.values()])
def get_instruments(self):
url = f'{self.server}/{self.service}/instruments'
try:
- instruments = requests.get(url, timeout=self.timeout).json()
+ r = requests.get(url, timeout=self.timeout).json()
+ validate_response(r, url)
+ instruments = r.json()
except Exception as err:
- warn(f'No instruments retrieved: {err}')
instruments = dict(dummy=[])
# Flatten the lists
return list(itertools.chain.from_iterable(instruments.values()))
def get_exposures(self, instrument, registry=1):
qparams = dict(instrument=instrument, registery=registry)
+ if self.min_day_obs:
+ qparams['min_day_obs'] = ut.day_obs_int(self.min_day_obs)
+ if self.max_day_obs:
+ qparams['max_day_obs'] = ut.day_obs_int(self.max_day_obs)
url = f'{self.server}/{self.service}/exposures?{urlencode(qparams)}'
try:
recs = requests.get(url, timeout=self.timeout).json()
except Exception as err:
- warn(f'No exposures retrieved: {err}')
recs = []
return recs
@@ -372,63 +473,61 @@ def get_messages(self,
obs_ids=None,
instruments=None,
message_text=None,
- min_day_obs=None,
- max_day_obs=None,
is_human='either',
is_valid='either',
exposure_flags=None,
- offset=None,
- limit=None,
- outfields=None,
):
- qparams = dict(is_human=is_human, is_valid=is_valid)
+ qparams = dict(is_human=is_human,
+ is_valid=is_valid,
+ order_by='-date_added',
+ )
if site_ids:
qparams['site_ids'] = site_ids
if obs_ids:
qparams['obs_ids'] = obs_ids
if instruments:
qparams['instruments'] = instruments
- if min_day_obs:
- qparams['min_day_obs'] = min_day_obs
- if max_day_obs:
- qparams['max_day_obs'] = max_day_obs
+ if self.min_day_obs:
+ qparams['min_day_obs'] = ut.day_obs_int(self.min_day_obs)
+ if self.max_day_obs:
+ qparams['max_day_obs'] = ut.day_obs_int(self.max_day_obs)
if exposure_flags:
qparams['exposure_flags'] = exposure_flags
- if offset:
- qparams['offset'] = offset
- if limit:
- qparams['limit'] = limit
+ if self.limit:
+ qparams['limit'] = self.limit
qstr = urlencode(qparams)
url = f'{self.server}/{self.service}/messages?{qstr}'
recs = []
+ error = None
try:
response = requests.get(url, timeout=self.timeout)
+ validate_response(response, url)
recs = response.json()
except Exception as err:
- warnings.warn(f'No {self.service} records retrieved: {err}')
-
- if len(recs) == 0:
- warn(f'No records retrieved from {url}')
+ recs = []
+ error = str(err)
if recs:
recs.sort(key=lambda r: r['day_obs'])
- keep_fields(outfields, recs)
- self.recs = recs
- return self.recs
+ self.keep_fields(recs, self.outfields)
+ self.records = recs
+ status = dict(
+ endpoint_url=url,
+ number_of_records=len(recs),
+ error=error,
+ )
+ return status
+
- def get_observation_gaps(self, instruments=None,
- min_day_obs=None, # YYYYMMDD
- max_day_obs=None, # YYYYMMDD
- ):
+ def get_observation_gaps(self, instruments=None):
if not instruments:
instruments = self.get_instruments()
assert isinstance(instruments,list), \
f'"instruments" must be a list. Got {instruments!r}'
- # inst_day_rollupol[instrument] => dict[day] => exposureGapInMinutes
+ # inst_day_rollup[instrument] => dict[day] => exposureGapInMinutes
inst_day_rollup = defaultdict(dict) # Instrument/Day rollup
-
for instrum in instruments:
recs = self.get_exposures(instrum)
instrum_gaps = dict()
@@ -452,54 +551,14 @@ def get_observation_gaps(self, instruments=None,
end = rec['timespan_end']
instrum_gaps[day] = gaps
- #!roll = dict()
# Rollup gap times by day
for day,tuples in instrum_gaps.items():
- #!roll[day] = sum([t[2] for t in tuples])
inst_day_rollup[instrum][day] = sum([t[2] for t in tuples])
return inst_day_rollup
+# END: class ExposurelogAdapter
-
-
-
-class Dashboard: # TODO Complete and move to its own file.
- """Verify that we can get to all the API endpoints and databases we need for
- any of our sources.
- """
-
- envs = dict(
- summit = 'https://summit-lsp.lsst.codes',
- usdf_dev = 'https://usdf-rsp-dev.slac.stanford.edu',
- tucson = 'https://tucson-teststand.lsst.codes',
- # Environments not currently used:
- # rubin_usdf_dev = '',
- # data_lsst_cloud = '',
- # usdf = '',
- # base_data_facility = '',
- # rubin_idf_int = '',
- )
- adapters = [ExposurelogAdapter,
- NarrativelogAdapter,
- # NightReportAdapter, # TODO
- ]
-
- def report(self, timeout=None):
- url_status = dict()
- for env,server in self.envs.items():
- for adapter in self.adapters:
- service = adapter(server_url=server)
- # url_status[endpoint_url] = http_status_code
- url_status.update(service.check_endpoints(timeout=timeout))
- total_cnt = 0
- good_cnt = 0
- good = list()
- print('\nStatus for each endpoint URL:')
- for url,stat in url_status.items():
- print(f'{stat}\t{url}')
- total_cnt += 1
- if stat == 200:
- good_cnt += 1
- good.append(url)
- print(f'\nConnected to {good_cnt} out of {total_cnt} endpoints.')
- return good_cnt, good
+adapters = [ExposurelogAdapter,
+ NarrativelogAdapter,
+ NightReportAdapter,
+ ]
diff --git a/python/lsst/ts/logging_and_reporting/utils.py b/python/lsst/ts/logging_and_reporting/utils.py
index c6801c8..54534a4 100644
--- a/python/lsst/ts/logging_and_reporting/utils.py
+++ b/python/lsst/ts/logging_and_reporting/utils.py
@@ -19,6 +19,60 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
+
+import time
+import datetime # datetime, date, time, timedelta
+
+
+# See https://github.com/lsst-sitcom/summit_utils/blob/0b3fd8795c9cca32f30cef0c37625c5d96804b74/python/lsst/summit/utils/efdUtils.py#L633
+# was: datetime_to_dayobs # TODO remove
+def datetime_to_day_obs(dt) -> str:
+ """Convert a datetime object to day_obs.
+ Round to the date of the start of the observing night.
+ Both the input datetime and output dayobs are in the same timezone.
+ Format of dayobs is
+
+ Parameters
+ ----------
+ dt : `datetime.datetime`
+ The date-time.
+
+ Returns
+ -------
+ day_obs : `str`
+ The day_obs, as a strung, e.g. 2023-12-25 (YYYY-MM-DD)
+ """
+ dodate = (dt - datetime.timedelta(hours=12)).date()
+ return dodate.strftime('%Y-%m-%d')
+
+# day_obs int to day_obs string (YYYY-MM-DD)
+def day_obs_str(day_obs: int) -> str:
+ dos = str(day_obs)
+ return f'{dos[0:4]}-{dos[4:6]}-{dos[6:8]}' # "YYYY-MM-DD"
+
+# day_obs str (YYYY-MM-DD) to day_obs int
+def day_obs_int(day_obs: str) -> int:
+ return int(day_obs.replace('-',''))
+
+# day_obs (str:YYYY-MM-DD or YYYYMMDD) to datetime.
+# Allow TODAY, YESTERDAY, TOMORROW
+# was: dos2dt
+def get_datetime_from_day_obs_str(day_obs):
+ match day_obs.lower():
+ case 'today':
+ date = datetime.datetime.now().date()
+ case 'yesterday':
+ date = datetime.datetime.now().date() - datetime.timedelta(days=1)
+ case 'tomorrow':
+ date = datetime.datetime.now().date() + datetime.timedelta(days=1)
+ case _:
+ no_dash = day_obs.replace('-','')
+ date = datetime.datetime.strptime(no_dash, '%Y%m%d').date()
+ return date
+
+
+
+
def tic():
"""Start timer.
"""
diff --git a/python/lsst/ts/logging_and_reporting/version.py b/python/lsst/ts/logging_and_reporting/version.py
index ee1c147..07fc7b6 100644
--- a/python/lsst/ts/logging_and_reporting/version.py
+++ b/python/lsst/ts/logging_and_reporting/version.py
@@ -1,3 +1,3 @@
# Generated by setuptools_scm
__all__ = ["__version__"]
-__version__ = "0.1.dev28+g968997e.d20240904"
+__version__ = "0.1.dev63+g83a61e0.d20240918"
diff --git a/requirements.txt b/requirements.txt
index c65b6c6..6b7e061 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,6 +2,10 @@ requests
# jupyter-lab --ip=0.0.0.0
jupyterlab # =3.1.17
pandas
-#matplotlib
-#numpy
+matplotlib
+numpy
pre-commit
+astroplan
+astropy
+lsst_efd_client
+#lsst.summit.utils
\ No newline at end of file
diff --git a/times-square.yaml b/times-square.yaml
index 0a976ce..6f8ca8e 100644
--- a/times-square.yaml
+++ b/times-square.yaml
@@ -1,8 +1,6 @@
enabled: true
root: notebooks_tsqr
ignore:
- - efd_*
- logrep_proto_*
- - TEMPLATE_*
description: >
Times Square for project-wide Logging and Reporting.