diff --git a/dragg/aggregator.py b/dragg/aggregator.py index 4125f40..fbec042 100644 --- a/dragg/aggregator.py +++ b/dragg/aggregator.py @@ -21,114 +21,70 @@ from pathos.pools import ProcessPool # Local -from dragg.mpc_calc import MPCCalc, manage_home, manage_home_forecast +from dragg.mpc_calc import MPCCalc, manage_home from dragg.redis_client import RedisClient from dragg.logger import Logger class Aggregator: def __init__(self): - self.thermal_trend = 0 - self.max_daily_temp = 8 - self.max_daily_ghi = 150 - self.min_daily_temp = 5 - self.tracked_reward_price = 0 - self.prev_load = 30 - self.all_rewards = [] self.log = Logger("aggregator") - self.rlagent_log = Logger("rl_agent") self.data_dir = os.path.expanduser(os.environ.get('DATA_DIR','data')) self.outputs_dir = os.path.join('outputs') if not os.path.isdir(self.outputs_dir): os.makedirs(self.outputs_dir) self.config_file = os.path.join(self.data_dir, os.environ.get('CONFIG_FILE', 'config.toml')) self.ts_data_file = os.path.join(self.data_dir, os.environ.get('SOLAR_TEMPERATURE_DATA_FILE', 'nsrdb.csv')) - # self.spp_data_file = os.path.join(self.data_dir, os.environ.get('SPP_DATA_FILE', 'tou_data.xlsx')) + self.spp_data_file = os.path.join(self.data_dir, os.environ.get('SPP_DATA_FILE', 'spp_data.xlsx')) self.required_keys = { - "community": {"total_number_homes": None}, + "community": {"total_number_homes"}, "home": { - "hvac": { - "r_dist": None, - "c_dist": None, - "p_cool_dist": None, - "p_heat_dist": None, - "temp_sp_dist": None, - "temp_deadband_dist": None - }, - "wh": { - "r_dist": None, - "c_dist": None, - "p_dist": None, - "sp_dist": None, - "deadband_dist": None, - "size_dist": None, - "waterdraws": { - "n_big_draw_dist", - "n_small_draw_dist", - "big_draw_size_dist", - "small_draw_size_dist" - } - }, - "battery": { - "max_rate": None, - "capacity": None, - "cap_bounds": None, - "charge_eff": None, - "discharge_eff": None, - "cons_penalty": None - }, - "pv": { - "area": None, - "efficiency": None - }, - "hems": { - "prediction_horizon": None, - "discomfort": None, - "disutility": None - } + "hvac": {"r_dist", "c_dist", "p_cool_dist", "p_heat_dist", "temp_sp_dist", "temp_deadband_dist"}, + "wh": {"r_dist", "c_dist", "p_dist", "sp_dist", "deadband_dist", "size_dist", "waterdraw_file"}, + "battery": {"max_rate", "capacity", "cap_bounds", "charge_eff", "discharge_eff", "cons_penalty"}, + "pv": {"area", "efficiency"}, + "hems": {"prediction_horizon", "discomfort", "disutility"} }, - "simulation": { - "start_datetime": None, - "end_datetime": None, - "random_seed": None, - "load_zone": None, - "check_type": None, - "run_rbo_mpc": None, - "run_rl_agg": None, - "run_rl_simplified": None - } + "simulation": {"start_datetime", "end_datetime", "random_seed", "load_zone", "check_type", "run_rbo_mpc"}, + # "agg": {"action_horizon", "forecast_horizon", "base_price", "max_rp", "subhourly_steps"} + "agg": {"base_price", "subhourly_steps"} } self.timestep = None # Set by redis_set_initial_values self.iteration = None # Set by redis_set_initial_values self.reward_price = None # Set by redis_set_initial_values self.start_hour_index = None # Set by calc_star_hour_index self.agg_load = 0 # Reset after each iteration - self.baseline_data = {} + self.collected_data = {} self.baseline_agg_load_list = [] # Aggregate load at every timestep from the baseline run self.max_agg_load = None # Set after baseline run, the maximum aggregate load over all the timesteps self.max_agg_load_list = [] - self.num_threads = 1 self.start_dt = None # Set by _set_dt self.end_dt = None # Set by _set_dt self.hours = None # Set by _set_dt self.dt = None # Set by _set_dt self.num_timesteps = None # Set by _set_dt self.all_homes = None # Set by get_homes - self.queue = Queue() self.redis_client = RedisClient() self.config = self._import_config() self.check_type = self.config['simulation']['check_type'] # One of: 'pv_only', 'base', 'battery_only', 'pv_battery', 'all' - self.ts_data = self._import_ts_data() # Temp: degC, RH: %, Pressure: mbar, GHI: W/m2 - # self.spp_data = self._import_spp_data() # SPP: $/kWh - self.all_data = self.join_data() + self.thermal_trend = None + self.max_daily_temp = None + self.max_daily_ghi = None + self.min_daily_temp = None + self.prev_load = None + self.ts_data = self._import_ts_data() # Temp: degC, RH: %, Pressure: mbar, GHI: W/m2 self._set_dt() - self._build_tou_price() - self.all_data.drop("ts", axis=1) + + self.spp_data = self._import_spp_data() # SPP: $/kWh + self.tou_data = self._build_tou_price() # TOU: $/kWh + self.all_data = self.join_data() self.all_rps = np.zeros(self.num_timesteps) self.all_sps = np.zeros(self.num_timesteps) + self.case = "baseline" + def _import_config(self): if not os.path.exists(self.config_file): self.log.logger.error(f"Configuration file does not exist: {self.config_file}") @@ -143,35 +99,15 @@ def _import_config(self): sys.exit(1) else: for subsystem in self.required_keys.keys(): - req_keys = set(self.required_keys[subsystem].keys()) - given_keys = set(data[subsystem].keys()) + req_keys = set(self.required_keys[subsystem]) + given_keys = set(data[subsystem]) if not req_keys.issubset(given_keys): missing_keys = req_keys - given_keys - self.logger.error(f"Parameters for {subsystem}: {missing_keys} must be specified in the config file.") + self.log.logger.error(f"Parameters for {subsystem}: {missing_keys} must be specified in the config file.") sys.exit(1) - if 'run_rl_agg' in data['simulation'] or 'run_rl_simplified' in data['simulation']: - self._check_rl_config(data) - self.log.logger.info(f"Set the version write out to {data['rl']['version']}") + self.log.logger.info(f"Set the version write out to {data['simulation']['named_version']}") return data - def _check_rl_config(self, data): - if 'run_rl_agg' in data['simulation']: - req_keys = {"parameters": {"learning_rate", "discount_factor", "batch_size", "exploration_rate", "twin_q"}, - "utility": {"rl_agg_action_horizon", "rl_agg_forecast_horizon", "base_price", "action_space", "action_scale", "hourly_steps"}, - } - elif 'run_rl_simplified' in data['simulation']: - req_keys = {"simplified": {"response_rate", "offset"}} - if not 'rl' in data: - self.log.logger.error(f"{missing_keys} must be configured in the config file.") - sys.exit(1) - else: - for subsystem in req_keys.keys(): - rkeys = set(req_keys[subsystem]) - gkeys = set(data['rl'][subsystem]) - if not rkeys.issubset(gkeys): - missing_keys = rkeys - return - def _set_dt(self): """ Convert the start and end datetimes specified in the config file into python datetime @@ -188,7 +124,6 @@ def _set_dt(self): self.hours = int(self.hours.total_seconds() / 3600) self.num_timesteps = int(np.ceil(self.hours * self.dt)) - self.mask = (self.all_data.index >= self.start_dt) & (self.all_data.index < self.end_dt) self.log.logger.info(f"Start: {self.start_dt.isoformat()}; End: {self.end_dt.isoformat()}; Number of hours: {self.hours}") def _import_ts_data(self): @@ -203,7 +138,7 @@ def _import_ts_data(self): sys.exit(1) df = pd.read_csv(self.ts_data_file, skiprows=2) - self.dt = int(self.config['rl']['utility']['hourly_steps'][0]) + self.dt = int(self.config['agg']['subhourly_steps']) self.dt_interval = 60 // self.dt reps = [np.ceil(self.dt/2) if val==0 else np.floor(self.dt/2) for val in df.Minute] df = df.loc[np.repeat(df.index.values, reps)] @@ -219,7 +154,8 @@ def _import_ts_data(self): df[["GHI", "OAT"]] = df[["GHI", "OAT"]].astype(int) self.oat = df['OAT'].to_numpy() self.ghi = df['GHI'].to_numpy() - return df.reset_index(drop=True) + df = df.set_index('ts') + return df def _import_spp_data(self): """ @@ -229,9 +165,13 @@ def _import_spp_data(self): Subtracts 1 hour from time to be inline with 23 hour day as required by pandas. :return: pandas.DataFrame, columns: ts, SPP """ + if not self.config['agg']['spp_enabled']: + return + if not os.path.exists(self.spp_data_file): - self.log.logger.error(f"TOU data file does not exist: {self.spp_data_file}") + self.log.logger.error(f"SPP data file does not exist: {self.spp_data_file}") sys.exit(1) + df_all = pd.read_excel(self.spp_data_file, sheet_name=None) k1 = list(df_all.keys())[0] df = df_all[k1] @@ -251,36 +191,46 @@ def _import_spp_data(self): df = df.rename(columns={"Settlement Point Price": "SPP"}) col_order = ["ts", "SPP"] df = df[col_order] - df[["ts"]] = df.loc[:, "ts"].apply(lambda x: datetime.strptime(x, '%m/%d/%Y %H')) - df[["SPP"]] = df.loc[:, "SPP"].apply(lambda x: x / 1000) - return df.reset_index(drop=True) + df["ts"] = datetime.strptime(df['ts'], '%m/%d/%Y %H') + df["SPP"] = df['SPP'] / 1000 + df = df.set_index('ts') + return df def _build_tou_price(self): - if self.config['rl']['utility']['tou_enabled'] == True: - sd_times = self.config['rl']['utility']['tou']['shoulder_times'] - pk_times = self.config['rl']['utility']['tou']['peak_times'] - op_price = float(self.config['rl']['utility']['base_price']) - sd_price = float(self.config['rl']['utility']['tou']['shoulder_price']) - pk_price = float(self.config['rl']['utility']['tou']['peak_price']) - self.all_data['tou'] = self.all_data['ts'].apply(lambda x: pk_price if (x.hour <= pk_times[1] and x.hour >= pk_times[0]) else (sd_price if x.hour <= sd_times[1] and x.hour >= sd_times[0] else op_price)) - else: - self.all_data['tou'] = float(self.config['rl']['utility']['base_price']) + df = pd.DataFrame(index=pd.date_range(start=self.start_dt, periods=self.hours, freq='H')) + df['tou'] = float(self.config['agg']['base_price']) + if self.config['agg']['tou_enabled'] == True: + sd_times = [int(i) for i in self.config['agg']['tou']['shoulder_times']] + pk_times = [int(i) for i in self.config['agg']['tou']['peak_times']] + sd_price = float(self.config['agg']['tou']['shoulder_price']) + pk_price = float(self.config['agg']['tou']['peak_price']) + df['tou'] = np.where(df.index.hour.isin(range(pk_times[0],pk_times[1])), pk_price, float(self.config['agg']['base_price'])) + df['tou'] = np.where(df.index.hour.isin(range(sd_times[0],sd_times[1])), sd_price, float(self.config['agg']['base_price'])) + return df + def join_data(self): """ Join the TOU, GHI, temp data into a single dataframe :return: pandas.DataFrame """ - df = pd.merge(self.ts_data, self.ts_data['ts'], how='outer', on='ts') + if self.config['agg']['spp_enabled']: + df = pd.merge(self.ts_data, self.spp_data, how='outer', left_index=True, right_index=True) + else: + df = pd.merge(self.ts_data, self.tou_data, how='outer', left_index=True, right_index=True) df = df.fillna(method='ffill') - return df.set_index('ts', drop=False) + self.mask = (df.index >= self.start_dt) & (df.index < self.end_dt) + return df def _check_home_configs(self): base_homes = [e for e in self.all_homes if e['type'] == "base"] pv_battery_homes = [e for e in self.all_homes if e['type'] == "pv_battery"] pv_only_homes = [e for e in self.all_homes if e['type'] == "pv_only"] battery_only_homes = [e for e in self.all_homes if e['type'] == "battery_only"] - if not len(base_homes) == self.config['community']['total_number_homes'] - self.config['community']['homes_battery'] - self.config['community']['homes_pv'] - self.config['community']['homes_pv_battery']: + if not len(base_homes) == (self.config['community']['total_number_homes'] + - self.config['community']['homes_battery'] + - self.config['community']['homes_pv'] + - self.config['community']['homes_pv_battery']): self.log.logger.error("Incorrect number of base homes.") sys.exit(1) elif not len(pv_battery_homes) == self.config['community']['homes_pv_battery']: @@ -311,6 +261,7 @@ def get_homes(self): else: self.create_homes() self._check_home_configs() + self.write_home_configs() def create_homes(self): """ @@ -369,11 +320,6 @@ def create_homes(self): self.config['home']['wh']['r_dist'][1], self.config['community']['total_number_homes'] ) - wh_c_dist = np.random.uniform( - self.config['home']['wh']['c_dist'][0], - self.config['home']['wh']['c_dist'][1], - self.config['community']['total_number_homes'] - ) wh_p_dist = np.random.uniform( self.config['home']['wh']['p_dist'][0], self.config['home']['wh']['p_dist'][1], @@ -409,30 +355,28 @@ def create_homes(self): daily_timesteps = int(24 * self.dt) home_wh_all_draw_size_dist = [] - self.waterdraws_from_csv = True - self.waterdraws_file = os.path.join(self.data_dir, '100_Random_Flow_Profiles.csv') - self.wh_info_file = os.path.join(self.data_dir, 'site_info.csv') - if self.waterdraws_from_csv: - waterdraw_df = pd.read_csv(self.waterdraws_file, index_col=0) - wh_info_df = pd.read_csv(self.wh_info_file, index_col=0) - waterdraw_df.index = pd.to_datetime(waterdraw_df.index, format='%Y-%m-%d %H:%M:%S') - sigma = 0.2 - waterdraw_df = waterdraw_df.applymap(lambda x: x * (1 + sigma * np.random.randn())) - waterdraw_df = waterdraw_df.resample('H').sum() - for j in range(self.config['community']['total_number_homes']): - this_house = waterdraw_df.sample(axis='columns').values - this_house = np.reshape(this_house, (-1, 24)) - this_house = this_house[np.random.choice(this_house.shape[0], ndays)].flatten() - this_house = np.clip(this_house, 0, home_wh_size_dist[j]) #.tolist() - home_wh_all_draw_size_dist.append(this_house.tolist()) + self.waterdraws_file = os.path.join(self.data_dir, self.config['home']['wh']['waterdraw_file']) + + waterdraw_df = pd.read_csv(self.waterdraws_file, index_col=0) + waterdraw_df.index = pd.to_datetime(waterdraw_df.index, format='%Y-%m-%d %H:%M:%S') + sigma = 0.2 + waterdraw_df = waterdraw_df.applymap(lambda x: x * (1 + sigma * np.random.randn())) + waterdraw_df = waterdraw_df.resample('H').sum() + for j in range(self.config['community']['total_number_homes']): + this_house = waterdraw_df.sample(axis='columns').values + this_house = np.reshape(this_house, (-1, 24)) + this_house = this_house[np.random.choice(this_house.shape[0], ndays)].flatten() + this_house = np.clip(this_house, 0, home_wh_size_dist[j]) #.tolist() + home_wh_all_draw_size_dist.append(this_house.tolist()) all_homes = [] responsive_hems = { - "horizon": self.mpc['horizon'], + "horizon": self.config['home']['hems']['prediction_horizon'], "hourly_agg_steps": self.dt, "sub_subhourly_steps": self.config['home']['hems']['sub_subhourly_steps'], - "solver": self.config['home']['hems']['solver'] + "solver": self.config['home']['hems']['solver'], + "discount_factor": self.config['home']['hems']['discount_factor'] } if not os.path.isdir(os.path.join('home_logs')): @@ -442,18 +386,24 @@ def create_homes(self): # Define pv and battery homes num_pv_battery_homes = self.config['community']['homes_pv_battery'] for j in range(num_pv_battery_homes): - hems = responsive_hems res = ''.join(random.choices(string.ascii_uppercase + string.digits, k=5)) name = names.get_first_name() + '-' + res battery = { - "max_rate": np.random.uniform(self.config['home']['battery']['max_rate'][0], self.config['home']['battery']['max_rate'][1]), - "capacity": np.random.uniform(self.config['home']['battery']['capacity'][0], self.config['home']['battery']['capacity'][1]), - "capacity_lower": np.random.uniform(self.config['home']['battery']['lower_bound'][0], self.config['home']['battery']['lower_bound'][1]), - "capacity_upper": np.random.uniform(self.config['home']['battery']['upper_bound'][0], self.config['home']['battery']['upper_bound'][1]), - "ch_eff": np.random.uniform(self.config['home']['battery']['charge_eff'][0], self.config['home']['battery']['charge_eff'][1]), - "disch_eff": np.random.uniform(self.config['home']['battery']['discharge_eff'][0], self.config['home']['battery']['discharge_eff'][1]), - "e_batt_init": np.random.uniform(self.config['home']['battery']['lower_bound'][1], self.config['home']['battery']['upper_bound'][0]) + "max_rate": np.random.uniform(self.config['home']['battery']['max_rate'][0], + self.config['home']['battery']['max_rate'][1]), + "capacity": np.random.uniform(self.config['home']['battery']['capacity'][0], + self.config['home']['battery']['capacity'][1]), + "capacity_lower": np.random.uniform(self.config['home']['battery']['lower_bound'][0], + self.config['home']['battery']['lower_bound'][1]), + "capacity_upper": np.random.uniform(self.config['home']['battery']['upper_bound'][0], + self.config['home']['battery']['upper_bound'][1]), + "ch_eff": np.random.uniform(self.config['home']['battery']['charge_eff'][0], + self.config['home']['battery']['charge_eff'][1]), + "disch_eff": np.random.uniform(self.config['home']['battery']['discharge_eff'][0], + self.config['home']['battery']['discharge_eff'][1]), + "e_batt_init": np.random.uniform(self.config['home']['battery']['lower_bound'][1], + self.config['home']['battery']['upper_bound'][0]) } pv = { @@ -478,7 +428,6 @@ def create_homes(self): }, "wh": { "r": wh_r_dist[i], - "c": wh_c_dist[i], "p": wh_p_dist[i], "temp_wh_min": home_wh_temp_min_dist[i], "temp_wh_max": home_wh_temp_max_dist[i], @@ -487,7 +436,7 @@ def create_homes(self): "tank_size": home_wh_size_dist[i], "draw_sizes": home_wh_all_draw_size_dist[i], }, - "hems": hems, + "hems": responsive_hems, "battery": battery, "pv": pv }) @@ -522,7 +471,6 @@ def create_homes(self): }, "wh": { "r": wh_r_dist[i], - "c": wh_c_dist[i], "p": wh_p_dist[i], "temp_wh_min": home_wh_temp_min_dist[i], "temp_wh_max": home_wh_temp_max_dist[i], @@ -531,7 +479,7 @@ def create_homes(self): "tank_size": home_wh_size_dist[i], "draw_sizes": home_wh_all_draw_size_dist[i], }, - "hems": hems, + "hems": responsive_hems, "pv": pv }) i += 1 @@ -544,13 +492,20 @@ def create_homes(self): name = names.get_first_name() + '-' + res battery = { - "max_rate": np.random.uniform(self.config['home']['battery']['max_rate'][0], self.config['home']['battery']['max_rate'][1]), - "capacity": np.random.uniform(self.config['home']['battery']['capacity'][0], self.config['home']['battery']['capacity'][1]), - "capacity_lower": np.random.uniform(self.config['home']['battery']['lower_bound'][0], self.config['home']['battery']['lower_bound'][1]), - "capacity_upper": np.random.uniform(self.config['home']['battery']['upper_bound'][0], self.config['home']['battery']['upper_bound'][1]), - "ch_eff": np.random.uniform(self.config['home']['battery']['charge_eff'][0], self.config['home']['battery']['charge_eff'][1]), - "disch_eff": np.random.uniform(self.config['home']['battery']['discharge_eff'][0], self.config['home']['battery']['discharge_eff'][1]), - "e_batt_init": np.random.uniform(self.config['home']['battery']['lower_bound'][1], self.config['home']['battery']['upper_bound'][0]) + "max_rate": np.random.uniform(self.config['home']['battery']['max_rate'][0], + self.config['home']['battery']['max_rate'][1]), + "capacity": np.random.uniform(self.config['home']['battery']['capacity'][0], + self.config['home']['battery']['capacity'][1]), + "capacity_lower": np.random.uniform(self.config['home']['battery']['lower_bound'][0], + self.config['home']['battery']['lower_bound'][1]), + "capacity_upper": np.random.uniform(self.config['home']['battery']['upper_bound'][0], + self.config['home']['battery']['upper_bound'][1]), + "ch_eff": np.random.uniform(self.config['home']['battery']['charge_eff'][0], + self.config['home']['battery']['charge_eff'][1]), + "disch_eff": np.random.uniform(self.config['home']['battery']['discharge_eff'][0], + self.config['home']['battery']['discharge_eff'][1]), + "e_batt_init": np.random.uniform(self.config['home']['battery']['lower_bound'][1], + self.config['home']['battery']['upper_bound'][0]) } all_homes.append({ @@ -568,7 +523,6 @@ def create_homes(self): }, "wh": { "r": wh_r_dist[i], - "c": wh_c_dist[i], "p": wh_p_dist[i], "temp_wh_min": home_wh_temp_min_dist[i], "temp_wh_max": home_wh_temp_max_dist[i], @@ -577,7 +531,7 @@ def create_homes(self): "tank_size": home_wh_size_dist[i], "draw_sizes": home_wh_all_draw_size_dist[i], }, - "hems": hems, + "hems": responsive_hems, "battery": battery }) i += 1 @@ -604,7 +558,6 @@ def create_homes(self): }, "wh": { "r": wh_r_dist[i], - "c": wh_c_dist[i], "p": wh_p_dist[i], "temp_wh_min": home_wh_temp_min_dist[i], "temp_wh_max": home_wh_temp_max_dist[i], @@ -613,24 +566,23 @@ def create_homes(self): "tank_size": home_wh_size_dist[i], "draw_sizes": home_wh_all_draw_size_dist[i], }, - "hems": hems + "hems": responsive_hems }) i += 1 self.all_homes = all_homes - self.write_home_configs() self.all_homes_obj = [] self.max_poss_load = 0 self.min_poss_load = 0 for home in all_homes: - obj = MPCCalc(home) - self.all_homes_obj += [obj] - self.max_poss_load += obj.max_load + home_obj = MPCCalc(home) + self.all_homes_obj += [home_obj] + self.max_poss_load += home_obj.max_load - def reset_baseline_data(self): + def reset_collected_data(self): self.baseline_agg_load_list = [] for home in self.all_homes: - self.baseline_data[home["name"]] = { + self.collected_data[home["name"]] = { "type": home["type"], "temp_in_sp": home["hvac"]["temp_in_sp"], "temp_wh_sp": home["wh"]["temp_wh_sp"], @@ -647,12 +599,12 @@ def reset_baseline_data(self): "correct_solve": [] } if 'pv' in home["type"]: - self.baseline_data[home["name"]]["p_pv_opt"] = [] - self.baseline_data[home["name"]]["u_pv_curt_opt"] = [] + self.collected_data[home["name"]]["p_pv_opt"] = [] + self.collected_data[home["name"]]["u_pv_curt_opt"] = [] if 'battery' in home["type"]: - self.baseline_data[home["name"]]["e_batt_opt"] = [home["battery"]["e_batt_init"]] - self.baseline_data[home["name"]]["p_batt_ch"] = [] - self.baseline_data[home["name"]]["p_batt_disch"] = [] + self.collected_data[home["name"]]["e_batt_opt"] = [home["battery"]["e_batt_init"]] + self.collected_data[home["name"]]["p_batt_ch"] = [] + self.collected_data[home["name"]]["p_batt_disch"] = [] def check_all_data_indices(self): """ @@ -663,7 +615,7 @@ def check_all_data_indices(self): if not self.start_dt >= self.all_data.index[0]: self.log.logger.error("The start datetime must exist in the data provided.") sys.exit(1) - if not self.end_dt + timedelta(hours=max(self.config['home']['hems']['prediction_horizon'])) <= self.all_data.index[-1]: + if not self.end_dt + timedelta(hours=self.config['home']['hems']['prediction_horizon']) <= self.all_data.index[-1]: self.log.logger.error("The end datetime + the largest prediction horizon must exist in the data provided.") sys.exit(1) @@ -684,15 +636,11 @@ def redis_set_initial_values(self): """ self.timestep = 0 - # self.e_batt_init = self.config['home']['battery']['capacity'] * self.config['home']['battery']['cap_bounds'][0] - # self.redis_client.conn.hset("initial_values", "e_batt_init", self.e_batt_init) self.redis_client.conn.set("start_hour_index", self.start_hour_index) self.redis_client.conn.hset("current_values", "timestep", self.timestep) - if self.case == "rl_agg" or self.case == "simplified": - self.reward_price = np.zeros(self.util['rl_agg_horizon'] * self.dt) - for val in self.reward_price.tolist(): - self.redis_client.conn.rpush("reward_price", val) + self.reward_price = np.zeros(self.config['agg']['rl']['action_horizon'] * self.dt) + self.redis_client.conn.rpush("reward_price", *self.reward_price.tolist()) def redis_add_all_data(self): """ @@ -711,33 +659,26 @@ def redis_set_current_values(self): :return: None """ self.redis_client.conn.hset("current_values", "timestep", self.timestep) - self.redis_client.conn.hset("current_values", "tracked_rp", self.tracked_reward_price) - if self.case == "rl_agg" or self.case == "simplified": - self.all_sps[self.timestep-1] = self.agg_setpoint - self.all_rps[self.timestep-1] = self.reward_price[0] - for i in range(len(self.reward_price)): - self.redis_client.conn.lpop("reward_price") - self.redis_client.conn.rpush("reward_price", self.reward_price[i]) + if 'rl' in self.case: + self.all_sps[self.timestep] = self.agg_setpoint + self.all_rps[self.timestep] = self.reward_price[0] + self.redis_client.conn.delete("reward_price") + self.redis_client.conn.rpush("reward_price", *self.reward_price) - def _gen_forecast(self): - """ - Forecasts the anticipated energy consumption at each timestep through - the MPCCalc class. Uses the predetermined reward price signal + a reward - price of 0 for any unforecasted reward price signals. - :return: list of type float - """ - results = self._threaded_forecast() - return np.sum(results, axis=0) - - def _gen_setpoint(self, time): + def gen_setpoint(self): """ Generates the setpoint of the RL utility. Dynamically sized for the number of houses in the community. :return: float """ - self.tracked_loads[:-1] = self.tracked_loads[1:] - self.tracked_loads[-1] = self.agg_load + if self.timestep < 2: + self.tracked_loads = [0.5 * self.max_poss_load] * self.config['agg']['rl']['prev_timesteps'] + self.max_load = -float("inf") + self.min_load = float("inf") + else: + self.tracked_loads[:-1] = self.tracked_loads[1:] + self.tracked_loads[-1] = self.agg_load self.avg_load = np.average(self.tracked_loads) if self.agg_load > self.max_load or self.timestep % 24 == 0: self.max_load = self.agg_load @@ -747,7 +688,7 @@ def _gen_setpoint(self, time): return sp def check_baseline_vals(self): - for home, vals in self.baseline_data.items(): + for home, vals in self.collected_data.items(): if self.check_type == 'all': homes_to_check = self.all_homes else: @@ -765,15 +706,15 @@ def run_iteration(self): from all homes in the community. Threaded, using pathos :return: None """ - pool = ProcessPool(nodes=self.config['simulation']['n_nodes']) # open a pool of nodes - results = pool.map(manage_home, self.as_list) - self.thermal_trend = self.oat[self.timestep + 4] - self.oat[self.timestep] day_of_year = self.timestep // (self.dt * 24) self.max_daily_temp = max(self.oat[day_of_year*(self.dt*24):(day_of_year+1)*(self.dt*24)]) self.min_daily_temp = min(self.oat[day_of_year*(self.dt*24):(day_of_year+1)*(self.dt*24)]) self.max_daily_ghi = max(self.ghi[day_of_year*(self.dt*24):(day_of_year+1)*(self.dt*24)]) + pool = ProcessPool(nodes=self.config['simulation']['n_nodes']) # open a pool of nodes + results = pool.map(manage_home, self.as_list) + self.timestep += 1 def collect_data(self): @@ -795,7 +736,7 @@ def collect_data(self): if 'battery' in home["type"]: opt_keys += ['p_batt_ch', 'p_batt_disch', 'e_batt_opt'] if k in opt_keys: - self.baseline_data[home["name"]][k].append(float(v)) + self.collected_data[home["name"]][k].append(float(v)) self.house_load.append(float(vals["p_grid_opt"])) self.forecast_house_load.append(float(vals["forecast_p_grid_opt"])) agg_cost += float(vals["cost_opt"]) @@ -803,8 +744,7 @@ def collect_data(self): self.forecast_load = np.sum(self.forecast_house_load) self.agg_cost = agg_cost self.baseline_agg_load_list.append(self.agg_load) - print("collected", self.agg_load) - self.agg_setpoint = self._gen_setpoint(self.timestep) + self.agg_setpoint = self.gen_setpoint() def run_baseline(self): """ @@ -813,7 +753,7 @@ def run_baseline(self): (For no MPC in HEMS specify the MPC prediction horizon as 0.) :return: None """ - self.log.logger.info(f"Performing baseline run for horizon: {self.mpc['horizon']}") + self.log.logger.info(f"Performing baseline run for horizon: {self.config['home']['hems']['prediction_horizon']}") self.start_time = datetime.now() self.as_list = [] @@ -829,6 +769,9 @@ def run_baseline(self): self.log.logger.info("Creating a checkpoint file.") self.write_outputs() + def my_summary(self): + return + def summarize_baseline(self): """ Get the maximum of the aggregate demand for each simulation. @@ -836,31 +779,48 @@ def summarize_baseline(self): """ self.end_time = datetime.now() self.t_diff = self.end_time - self.start_time - self.log.logger.info(f"Horizon: {self.mpc['horizon']}; Num Hours Simulated: {self.hours}; Run time: {self.t_diff.total_seconds()} seconds") + self.log.logger.info(f"Horizon: {self.config['home']['hems']['prediction_horizon']}; Num Hours Simulated: {self.hours}; Run time: {self.t_diff.total_seconds()} seconds") self.max_agg_load = max(self.baseline_agg_load_list) self.max_agg_load_list.append(self.max_agg_load) - # self.log.logger.info(f"Max load list: {self.max_agg_load_list}") - self.baseline_data["Summary"] = { + self.collected_data["Summary"] = { "case": self.case, "start_datetime": self.start_dt.strftime('%Y-%m-%d %H'), "end_datetime": self.end_dt.strftime('%Y-%m-%d %H'), "solve_time": self.t_diff.total_seconds(), - "horizon": self.mpc['horizon'], + "horizon": self.config['home']['hems']['prediction_horizon'], "num_homes": self.config['community']['total_number_homes'], "p_max_aggregate": self.max_agg_load, "p_grid_aggregate": self.baseline_agg_load_list, - # "SPP": self.all_data.loc[self.mask, "SPP"].values.tolist(), "OAT": self.all_data.loc[self.mask, "OAT"].values.tolist(), "GHI": self.all_data.loc[self.mask, "GHI"].values.tolist(), - "TOU": self.all_data.loc[self.mask, "tou"].values.tolist(), "RP": self.all_rps.tolist(), "p_grid_setpoint": self.all_sps.tolist(), - "rl_rewards": self.all_rewards + # "rl_rewards": self.all_rewards } - def write_outputs(self, inc_rl_agents=True): + self.my_summary() + + if self.config['agg']['spp_enabled']: + self.collected_data["Summary"]["SPP"] = self.all_data.loc[self.mask, "SPP"].values.tolist(), + else: + self.collected_data["Summary"]["TOU"] = self.all_data.loc[self.mask, "tou"].values.tolist(), + + def set_run_dir(self): + """ + Sets the run directoy based on the start/end datetime, community and home configs, + and the named version. + :return: none + """ + date_output = os.path.join(self.outputs_dir, f"{self.start_dt.strftime('%Y-%m-%dT%H')}_{self.end_dt.strftime('%Y-%m-%dT%H')}") + mpc_output = os.path.join(date_output, f"{self.check_type}-homes_{self.config['community']['total_number_homes']}-horizon_{self.config['home']['hems']['prediction_horizon']}-interval_{self.dt_interval}-{self.dt_interval // self.config['home']['hems']['sub_subhourly_steps']}-solver_{self.config['home']['hems']['solver']}") + + self.run_dir = os.path.join(mpc_output, f"version-{self.version}") + if not os.path.isdir(self.run_dir): + os.makedirs(self.run_dir) + + def write_outputs(self): """ Writes values for simulation run to a json file for later reference. Is called at the end of the simulation run period and optionally at a checkpoint period. @@ -868,42 +828,12 @@ def write_outputs(self, inc_rl_agents=True): """ self.summarize_baseline() - date_output = os.path.join(self.outputs_dir, f"{self.start_dt.strftime('%Y-%m-%dT%H')}_{self.end_dt.strftime('%Y-%m-%dT%H')}_{self.dt_interval}-{self.dt_interval // self.config['home']['hems']['sub_subhourly_steps'][0]}") - if not os.path.isdir(date_output): - os.makedirs(date_output) - - mpc_output = os.path.join(date_output, f"{self.check_type}-homes_{self.config['community']['total_number_homes']}-horizon_{self.mpc['horizon']}-interval_{self.dt_interval // self.config['home']['hems']['sub_subhourly_steps'][0]}") - if not os.path.isdir(mpc_output): - os.makedirs(mpc_output) - - agg_output = os.path.join(mpc_output, f"{self.case}") - if not os.path.isdir(agg_output): - os.makedirs(agg_output) - - if self.case == "baseline": - run_name = f"{self.case}_version-{self.version}-results.json" - file = os.path.join(agg_output, run_name) - - else: - run_name = f"agg_horizon_{self.util['rl_agg_horizon']}-interval_{self.dt_interval}-alpha_{self.rl_params['alpha']}-epsilon_{self.rl_params['epsilon']}-beta_{self.rl_params['beta']}_batch-{self.rl_params['batch_size']}_version-{self.version}" - run_dir = os.path.join(agg_output, run_name) - if not os.path.isdir(run_dir): - os.makedirs(run_dir) - if inc_rl_agents: - q_data = {} - for agent in self.rl_agents: - q_data[agent.name] = agent.rl_data - q_file = os.path.join(agg_output, run_name, "q-results.json") - with open(q_file, 'w+') as f: - json.dump(q_data, f, indent=4) - rewards_data_file = os.path.join(agg_output, run_name, "rewards.json") - - file = os.path.join(agg_output, run_name, "results.json") - + case_dir = os.path.join(self.run_dir, self.case) + if not os.path.isdir(case_dir): + os.makedirs(case_dir) + file = os.path.join(case_dir, "results.json") with open(file, 'w+') as f: - json.dump(self.baseline_data, f, indent=4) - with open(rewards_data_file, 'w+') as f: - json.dump(self.all_rewards, f, indent=4) + json.dump(self.collected_data, f, indent=4) def write_home_configs(self): """ @@ -932,10 +862,8 @@ def set_agg_mpc_initial_vals(self): def set_dummy_rl_parameters(self): self.tracked_loads = self.config['community']['house_p_avg']*self.config['community']['total_number_homes']*np.ones(12) - self.mpc = self.mpc_permutations[0] - self.util = self.util_permutations[0] - self.rl_params = self.rl_permutations[0] - self.version = self.versions[0] + # self.util = self.util_permutations[0] + # self.rl_params = self.rl_permutations[0] def setup_rl_agg_run(self): self.flush_redis() @@ -945,63 +873,28 @@ def setup_rl_agg_run(self): if self.check_type == "all" or home["type"] == self.check_type: self.as_list += [home] - self.log.logger.info(f"Performing RL AGG (agg. horizon: {self.util['rl_agg_horizon']}, learning rate: {self.rl_params['alpha']}, discount factor: {self.rl_params['beta']}, exploration rate: {self.rl_params['epsilon']}) with MPC HEMS for horizon: {self.mpc['horizon']}") + # self.log.logger.info(f"Performing RL AGG (agg. horizon: {self.util['rl_agg_horizon']}, learning rate: {self.rl_params['alpha']}, discount factor: {self.rl_params['beta']}, exploration rate: {self.rl_params['epsilon']}) with MPC HEMS for horizon: {self.config['home']['hems']['prediction_horizon']}") self.start_time = datetime.now() - self.actionspace = self.config['rl']['utility']['action_space'] self.baseline_agg_load_list = [0] self.all_rewards = [] self.forecast_load = [3*len(self.all_homes_obj)] self.prev_forecast_load = self.forecast_load - self.forecast_setpoint = self._gen_setpoint(self.timestep) + self.forecast_setpoint = self.gen_setpoint() self.agg_load = self.forecast_load[0] # approximate load for initial timestep - self.agg_setpoint = self._gen_setpoint(self.timestep) + self.agg_setpoint = self.gen_setpoint() self.redis_set_current_values() - # def run_rl_agg(self): - # """ - # Runs simulation with the RL aggregator agent(s) to determine the reward - # price signal. - # :return: None - # """ - # self.setup_rl_agg_run() - # - # self.num_agents = 2 - # self.rl_agents = [HorizonAgent(self.rl_params, self.rlagent_log)] - # for i in range(self.num_agents-1): - # self.rl_agents += [NextTSAgent(self.rl_params, self.rlagent_log, i)] # nexttsagent has a smaller actionspace and a correspondingly smaller exploration rate - # - # for t in range(self.num_timesteps): - # self.agg_setpoint = self._gen_setpoint(self.timestep // self.dt) - # self.prev_forecast_load = self.forecast_load - # self.forecast_setpoint = self._gen_setpoint(self.timestep + 1) - # - # self.run_iteration() - # self.collect_data() - # - # self.forecast_load = self._gen_forecast() - # self.reward_price[0] = self.rl_agents[-1].train(self) - # - # self.redis_set_current_values() # broadcast rl price to community - # - # if t > 0 and t % (self.checkpoint_interval) == 0: # weekly checkpoint - # self.log.logger.info("Creating a checkpoint file.") - # self.write_outputs() - # - # self.end_time = datetime.now() - # self.t_diff = self.end_time - self.start_time - # self.log.logger.info(f"Horizon: {self.mpc['horizon']}; Num Hours Simulated: {self.hours}; Run time: {self.t_diff.total_seconds()} seconds") - def test_response(self): """ Tests the RL agent using a linear model of the community's response to changes in the reward price. :return: None """ - c = self.config['rl']['simplified']['response_rate'] - k = self.config['rl']['simplified']['offset'] + c = self.config['agg']['simplified']['response_rate'] + k = self.config['agg']['simplified']['offset'] if self.timestep == 0: self.agg_load = self.agg_setpoint + 0.1*self.agg_setpoint self.agg_load = self.agg_load - c * self.reward_price[0] * (self.agg_setpoint - self.agg_load) @@ -1009,49 +902,10 @@ def test_response(self): self.log.logger.info(f"Iteration {self.timestep} finished. Aggregate load {self.agg_load}") self.timestep += 1 - def run_rl_agg_simplified(self): - """ - Runs a simplified community response to the reward price signal. Used to - validate the RL agent model. - :return: None - """ - self.log.logger.info(f"Performing RL AGG (agg. horizon: {self.util['rl_agg_horizon']}, learning rate: {self.rl_params['alpha']}, discount factor: {self.rl_params['beta']}, exploration rate: {self.rl_params['epsilon']}) with simplified community model.") - self.start_time = datetime.now() - - self.actionspace = self.config['rl']['utility']['action_space'] - self.baseline_agg_load_list = [0] - - self.forecast_setpoint = self._gen_setpoint(self.timestep) - self.forecast_load = [self.forecast_setpoint] - self.prev_forecast_load = self.forecast_load - - self.agg_load = self.forecast_load[0] # approximate load for initial timestep - self.agg_setpoint = self._gen_setpoint(self.timestep) - - horizon_agent = HorizonAgent(self.rl_params, self.rlagent_log) - self.rl_agents = [horizon_agent] - - for t in range(self.num_timesteps): - self.agg_setpoint = self._gen_setpoint(self.timestep // self.dt) - self.prev_forecast_load = self.forecast_load - self.forecast_load = [self.agg_load] # forecast current load at next timestep - self.forecast_setpoint = self._gen_setpoint(self.timestep + 1) - - self.redis_set_current_values() # broadcast rl price to community - self.test_response() - self.baseline_agg_load_list.append(self.agg_load) - - self.reward_price[:-1] = self.reward_price[1:] - self.reward_price[-1] = horizon_agent.train(self) / self.config['rl']['utility']['action_scale'] - - self.end_time = datetime.now() - self.t_diff = self.end_time - self.start_time - self.log.logger.info(f"Num Hours Simulated: {self.hours}; Run time: {self.t_diff.total_seconds()} seconds") - def flush_redis(self): """ Cleans all information stored in the Redis server. (Including environmental - data.) + and home data.) :return: None """ self.redis_client.conn.flushall() @@ -1062,25 +916,19 @@ def flush_redis(self): self.redis_add_all_data() self.redis_set_initial_values() - def set_value_permutations(self): - mpc_parameters = {"horizon": [int(i) for i in self.config['home']['hems']['prediction_horizon']]} - keys, values = zip(*mpc_parameters.items()) - self.mpc_permutations = [dict(zip(keys, v)) for v in it.product(*values)] - - util_parameters = {"rl_agg_horizon": [int(i) for i in self.config['rl']['utility']['rl_agg_action_horizon']]} - keys, values = zip(*util_parameters.items()) - self.util_permutations = [dict(zip(keys, v)) for v in it.product(*values)] - - rl_parameters = {"alpha": [float(i) for i in self.config['rl']['parameters']['learning_rate']], - "beta": [float(i) for i in self.config['rl']['parameters']['discount_factor']], - "epsilon": [float(i) for i in self.config['rl']['parameters']['exploration_rate']], - "batch_size": [int(i) for i in self.config['rl']['parameters']['batch_size']], - "twin_q": [self.config['rl']['parameters']['twin_q']] - } - keys, values = zip(*rl_parameters.items()) - self.rl_permutations = [dict(zip(keys, v)) for v in it.product(*values)] - - self.versions = self.config['rl']['version'] + # def set_value_permutations(self): + # """ + # --CURRENTLY DEPRICATED TO INTERFACE WITH DRAGGEnv-- + # Uses the list of config parameters to create permutations of all parameters listed + # for use in batch runs. + # """ + # mpc_parameters = {"horizon": [int(i) for i in self.config['home']['hems'][]]} + # keys, values = zip(*mpc_parameters.items()) + # self.mpc_permutations = [dict(zip(keys, v)) for v in it.product(*values)] + # + # util_parameters = {"rl_agg_horizon": [int(i) for i in self.config['agg'][]]]} + # keys, values = zip(*util_parameters.items()) + # self.util_permutations = [dict(zip(keys, v)) for v in it.product(*values)] def run(self): """ @@ -1098,42 +946,17 @@ def run(self): elif self.config['simulation']['checkpoint_interval'] == "weekly": self.checkpoint_interval = self.dt * 24 * 7 - self.set_value_permutations() + self.version = self.config['simulation']['named_version'] + self.set_run_dir() if self.config['simulation']['run_rbo_mpc']: # Run baseline MPC with N hour horizon, no aggregator # Run baseline with 1 hour horizon for non-MPC HEMS - self.case = "baseline" # no aggregator - for self.mpc in self.mpc_permutations: - for self.version in self.versions: - self.flush_redis() - self.get_homes() - self.reset_baseline_data() - self.run_baseline() - self.write_outputs() - - if self.config['simulation']['run_rl_agg']: - self.case = "rl_agg" - - for self.mpc in self.mpc_permutations: - for self.util in self.util_permutations: - for self.rl_params in self.rl_permutations: - for self.version in self.versions: - self.flush_redis() - self.get_homes() - self.reset_baseline_data() - self.run_rl_agg() - self.write_outputs() - - if self.config['simulation']['run_rl_simplified']: - self.case = "simplified" - self.all_homes = [] - - for self.mpc in self.mpc_permutations: - for self.util in self.util_permutations: - for self.rl_params in self.rl_permutations: - for self.version in self.versions: - self.flush_redis() - self.reset_baseline_data() - self.run_rl_agg_simplified() - self.write_outputs() + self.case = "baseline" # no aggregator level control + # for self.mpc in self.mpc_permutations: + # for self.version in self.versions: + self.flush_redis() + self.get_homes() + self.reset_collected_data() + self.run_baseline() + self.write_outputs() diff --git a/dragg/data/config-template.toml b/dragg/data/config-template.toml index 9322111..4b0cb7f 100644 --- a/dragg/data/config-template.toml +++ b/dragg/data/config-template.toml @@ -1,89 +1,70 @@ [community] -total_number_homes = [ 10, 0,] -homes_battery = [ 0, 0,] -homes_pv = [ 3, 0,] -homes_pv_battery = [ 0, 0,] +total_number_homes = 10 +homes_battery = 0 +homes_pv = 4 +homes_pv_battery = 0 overwrite_existing = true -house_p_avg = 1.0 +house_p_avg = 1.2 + +[simulation] +start_datetime = "2015-01-01 00" +end_datetime = "2015-01-04 00" +random_seed = 12 +n_nodes = 4 +load_zone = "LZ_HOUSTON" +check_type = "all" +run_rbo_mpc = true +checkpoint_interval = "daily" +named_version = "test" + +[agg] +base_price = 0.07 +subhourly_steps = 1 +tou_enabled = true +spp_enabled = false + +[agg.rl] +action_horizon = 1 +forecast_horizon = 1 +prev_timesteps = 12 +max_rp = 0.02 [home.hvac] r_dist = [ 6.8, 9.199999999999999,] c_dist = [ 4.25, 5.75,] -p_cool_dist = [ 2, 9,] -p_heat_dist = [ 2.0, 13.5,] +p_cool_dist = [ 3.5, 3.5,] +p_heat_dist = [ 3.5, 3.5,] temp_sp_dist = [ 18, 22,] temp_deadband_dist = [ 2, 3,] [home.wh] r_dist = [ 18.7, 25.3,] -c_dist = [ 4.25, 5.75,] -p_dist = [ 5, 8,] +p_dist = [ 2.5, 2.5,] sp_dist = [ 45.5, 48.5,] deadband_dist = [ 9, 12,] size_dist = [ 200, 300,] - -[home.wh.waterdraws] -n_big_draw_dist = [ 5, 8,] -n_small_draw_dist = [ 30, 50,] -big_draw_size_dist = [ 5, 10,] -small_draw_size_dist = [ 0.2, 0.5,] +waterdraw_file = '100_Random_Flow_Profiles.csv' [home.battery] -max_rate = 5 -capacity = 13.5 -cap_bounds = [ 0.15, 0.85,] -charge_eff = 0.95 -discharge_eff = 0.99 -cons_penalty = 0.005 +max_rate = [3,5] +capacity = [9.0,13.5] +lower_bound = [ 0.01, 0.15] +upper_bound = [ 0.85, 0.99] +charge_eff = [0.85, 0.95] +discharge_eff = [0.97, 0.99] [home.pv] -area = 32 -efficiency = 0.2 +area = [20, 32] +efficiency = [0.15, 0.2] [home.hems] -prediction_horizon = [ 6,] -price_uncertainty = 0.3 -sub_subhourly_steps = [ 4,] -solver = "GLPK_MI" # GLPK for linear non-integer, GUROBI (Licensed) for quadratic integer, ECOS for quadratic non-integer +prediction_horizon = 6 +sub_subhourly_steps = 6 +discount_factor = 0.92 +solver = "GLPK_MI" -[simulation] -start_datetime = "2015-01-01 00" -end_datetime = "2015-03-01 00" -loop_days = true -random_seed = 12 -n_nodes = 4 -load_zone = "LZ_HOUSTON" -check_type = "all" -run_rbo_mpc = false -run_rl_agg = true -run_rl_simplified = false -checkpoint_interval = "daily" - -[rl] # for naming -version = [ "test",] - -[rl.parameters] # depricated for use with OpenAI Gym -learning_rate = [ 0.01,] -discount_factor = [ 1.0,] -batch_size = [ 4, 32,] -exploration_rate = [ 0.01,] -twin_q = false - -[rl.utility] -rl_agg_action_horizon = [ 6, 4,] -rl_agg_forecast_horizon = 1 -base_price = 0.1 -action_space = [ -1.0, 1.0,] -hourly_steps = [ 1,] -minutes_per_step = 120 -tou_enabled = false - -[rl.utility.tou] # only necessary if tou_enabled == true +[agg.tou] shoulder_times = [ 9, 21,] shoulder_price = 0.09 peak_times = [ 14, 18,] peak_price = 0.13 - -[rl.simplified] -response_rate = 0.3 -offset = 0.2 diff --git a/dragg/data/config.toml b/dragg/data/config.toml index 142efbf..4b0cb7f 100644 --- a/dragg/data/config.toml +++ b/dragg/data/config.toml @@ -1,46 +1,33 @@ [community] -total_number_homes = 5 +total_number_homes = 10 homes_battery = 0 -homes_pv = 0 -homes_pv_battery = 2 +homes_pv = 4 +homes_pv_battery = 0 overwrite_existing = true house_p_avg = 1.2 [simulation] start_datetime = "2015-01-01 00" -end_datetime = "2015-03-01 00" -loop_days = true +end_datetime = "2015-01-04 00" random_seed = 12 n_nodes = 4 load_zone = "LZ_HOUSTON" check_type = "all" -run_rbo_mpc = false -run_rl_agg = true -run_rl_simplified = false +run_rbo_mpc = true checkpoint_interval = "daily" +named_version = "test" -[rl] -version = [ "dn-cleaned", "dn-test", ] +[agg] +base_price = 0.07 +subhourly_steps = 1 +tou_enabled = true +spp_enabled = false -[rl.parameters] -learning_rate = [ 0.01,] -discount_factor = [ 1.0,] -batch_size = [ 4, 32,] -exploration_rate = [ 0.01,] -twin_q = false - -[rl.utility] -rl_agg_action_horizon = [ 6, 4,] -rl_agg_forecast_horizon = 1 -base_price = 0.1 -action_space = [ -1.0, 1.0,] -hourly_steps = [ 1,] -minutes_per_step = 120 -tou_enabled = false - -[rl.simplified] -response_rate = 0.3 -offset = 0.2 +[agg.rl] +action_horizon = 1 +forecast_horizon = 1 +prev_timesteps = 12 +max_rp = 0.02 [home.hvac] r_dist = [ 6.8, 9.199999999999999,] @@ -52,11 +39,11 @@ temp_deadband_dist = [ 2, 3,] [home.wh] r_dist = [ 18.7, 25.3,] -c_dist = [ 4.25, 5.75,] p_dist = [ 2.5, 2.5,] sp_dist = [ 45.5, 48.5,] deadband_dist = [ 9, 12,] size_dist = [ 200, 300,] +waterdraw_file = '100_Random_Flow_Profiles.csv' [home.battery] max_rate = [3,5] @@ -71,19 +58,13 @@ area = [20, 32] efficiency = [0.15, 0.2] [home.hems] -prediction_horizon = [ 6,] -price_uncertainty = 0.3 -sub_subhourly_steps = [ 6,] +prediction_horizon = 6 +sub_subhourly_steps = 6 +discount_factor = 0.92 solver = "GLPK_MI" -[rl.utility.tou] +[agg.tou] shoulder_times = [ 9, 21,] shoulder_price = 0.09 peak_times = [ 14, 18,] peak_price = 0.13 - -[home.wh.waterdraws] -n_big_draw_dist = [ 2, 5,] -n_small_draw_dist = [ 0, 0,] -big_draw_size_dist = [ 25, 40,] -small_draw_size_dist = [ 7.5, 15.0,] diff --git a/dragg/main.py b/dragg/main.py index 95286dc..56bc7b5 100644 --- a/dragg/main.py +++ b/dragg/main.py @@ -13,7 +13,7 @@ # include_runs = {} # add_outputs = {} - r = Reformat(mpc_params={"mpc_discomfort":[]}) - r.main() # use main to plot a suite of graphs + # r = Reformat(mpc_params={"mpc_discomfort":[]}) + # r.main() # use main to plot a suite of graphs # r.save_images() # saves the images # r.rl2baseline() # specific plots available through named methods diff --git a/dragg/mpc_calc.py b/dragg/mpc_calc.py index a689ef0..2514dd9 100644 --- a/dragg/mpc_calc.py +++ b/dragg/mpc_calc.py @@ -13,16 +13,9 @@ from dragg.redis_client import RedisClient from dragg.logger import Logger -def manage_home_forecast(home): - """ - Calls class method as a top level function (picklizable) - :return: list - """ - return home.forecast_home() - def manage_home(home): """ - Calls class method as a top level function (picklizable) + Calls class method as a top level function (picklizable by pathos) :return: None """ home.run_home() @@ -32,7 +25,7 @@ class MPCCalc: def __init__(self, home): """ params - home: Dictionary with keys + home: Dictionary with keys for HVAC, WH, and optionally PV, battery parameters """ self.home = home # reset every time home retrieved from Queue self.name = home['name'] @@ -122,7 +115,6 @@ def redis_get_prev_optimal_vals(self): self.prev_optimal_vals = self.redis_client.conn.hgetall(key) def initialize_environmental_variables(self): - # get a connection to redis self.redis_client = RedisClient() # collect all values necessary @@ -132,7 +124,6 @@ def initialize_environmental_variables(self): self.all_spp = self.redis_client.conn.lrange('SPP', 0, -1) self.all_tou = self.redis_client.conn.lrange('tou', 0, -1) self.base_cents = float(self.all_tou[0]) - self.tracked_price = [float(i) for i in self.all_tou[:12]] # cast all values to proper type self.start_hour_index = int(float(self.start_hour_index)) @@ -151,13 +142,14 @@ def setup_base_problem(self): try: self.solver = solvers[self.home['hems']['solver']] except: - self.solver = cp.GUROBI + self.solver = cp.GLPK_MI # Set up the horizon for the MPC calc (min horizon = 1, no MPC) - self.sub_subhourly_steps = max(1, int(self.home['hems']['sub_subhourly_steps'][0])) + self.sub_subhourly_steps = max(1, int(self.home['hems']['sub_subhourly_steps'])) self.dt = max(1, int(self.home['hems']['hourly_agg_steps'])) self.horizon = max(1, int(self.home['hems']['horizon'] * self.dt)) self.h_plus = self.horizon + 1 + self.discount = float(self.home['hems']['discount_factor']) # Initialize RP structure so that non-forecasted RPs have an expected value of 0. self.reward_price = np.zeros(self.horizon) @@ -196,7 +188,7 @@ def setup_base_problem(self): self.temp_in_max = cp.Constant(float(self.home["hvac"]["temp_in_max"])) self.t_in_init = float(self.home["hvac"]["temp_in_init"]) - self.max_load = max(self.hvac_p_c.value, self.hvac_p_h.value) + self.wh_p.value + self.max_load = (max(self.hvac_p_c.value, self.hvac_p_h.value) + self.wh_p.value) * self.sub_subhourly_steps def water_draws(self): draw_sizes = (self.horizon // self.dt + 1) * [0] + self.home["wh"]["draw_sizes"] @@ -273,6 +265,8 @@ def get_initial_conditions(self): self.water_draws() if self.timestep == 0: + self.initialize_environmental_variables() + self.temp_in_init = cp.Constant(self.t_in_init) self.temp_wh_init = cp.Constant((self.t_wh_init*(self.wh_size - self.draw_size[0]) + self.tap_temp * self.draw_size[0]) / self.wh_size) @@ -309,12 +303,10 @@ def add_base_constraints(self): if max(self.oat_current_ev) <= 30: # "winter" self.hvac_heat_max = self.sub_subhourly_steps self.hvac_cool_max = 0 - # self.constraints += [self.hvac_cool_on == 0] else: # "summer" self.hvac_heat_max = 0 self.hvac_cool_max = self.sub_subhourly_steps - # self.constraints += [self.hvac_heat_on == 0] self.constraints = [ # Indoor air temperature constraints @@ -341,7 +333,7 @@ def add_base_constraints(self): self.temp_wh_ev >= self.temp_wh_min, self.temp_wh_ev <= self.temp_wh_max, - self.temp_wh == self.temp_wh_init # !!!! significant change (dT/R) + (p/C) rather than (dT/R + p)/C + self.temp_wh == self.temp_wh_init + 3600 * (((self.temp_in_ev[1] - self.temp_wh_init) / self.wh_r) + self.wh_heat_on[0] * self.wh_p) / (self.wh_c * self.dt), self.temp_wh >= self.temp_wh_min, @@ -400,7 +392,7 @@ def set_base_p_grid(self): """ self.constraints += [ # Set grid load - self.p_grid == self.p_load # p_load = p_hvac + p_wh + self.p_grid == self.p_load # where p_load = p_hvac + p_wh ] def set_battery_only_p_grid(self): @@ -450,7 +442,7 @@ def solve_mpc(self): self.wh_weighting = 10 self.objective = cp.Variable(self.horizon) self.constraints += [self.cost == cp.multiply(self.total_price, self.p_grid)] # think this should work - self.weights = cp.Constant(np.power(0.92*np.ones(self.horizon), np.arange(self.horizon))) + self.weights = cp.Constant(np.power(self.discount*np.ones(self.horizon), np.arange(self.horizon))) self.obj = cp.Minimize(cp.sum(cp.multiply(self.cost, self.weights))) #+ self.wh_weighting * cp.sum(cp.abs(self.temp_wh_max - self.temp_wh_ev))) #cp.sum(self.temp_wh_sp - self.temp_wh_ev)) self.prob = cp.Problem(self.obj, self.constraints) if not self.prob.is_dcp(): @@ -476,8 +468,6 @@ def implement_presolve(self): + (((self.temp_in_ev[1:self.h_plus] - self.temp_wh_ev[:self.horizon]) / self.wh_r) + self.wh_heat_on * self.wh_p) / (self.wh_c * self.dt), - # self.p_load == self.sub_subhourly_steps * (self.hvac_p_c * self.hvac_cool_on + self.hvac_p_h * self.hvac_heat_on + self.wh_p * self.wh_heat_on), - self.hvac_cool_on == np.array(self.presolve_hvac_cool_on, dtype=np.double), self.hvac_heat_on == np.array(self.presolve_hvac_heat_on, dtype=np.double), self.wh_heat_on == np.array(self.presolve_wh_heat_on, dtype=np.double) @@ -643,11 +633,7 @@ def cast_redis_curr_rps(self): :return: None """ rp = self.redis_client.conn.lrange('reward_price', 0, -1) - # num_agg_steps_seen = int(np.ceil(self.horizon / self.sub_subhourly_steps)) - # self.reward_price[:min(len(rp), num_agg_steps_seen)] = rp[:min(len(rp), num_agg_steps_seen)] self.reward_price = rp[:self.horizon] - self.tracked_price[:-1] = self.tracked_price[1:] - self.tracked_price[0] = self.reward_price[0] self.log.info(f"ts: {self.timestep}; RP: {self.reward_price[0]}") def solve_type_problem(self): diff --git a/dragg/reformat.py b/dragg/reformat.py index 3d033d0..1c2d9b1 100755 --- a/dragg/reformat.py +++ b/dragg/reformat.py @@ -18,55 +18,32 @@ from dragg.logger import Logger class Reformat: - def __init__(self, add_outputs={}, agg_params={"rl_horizon":[1]}, mpc_params={}, versions=set([0.0]), date_ranges={"end_datetime":[]}, include_runs={}, log=Logger("reformat")): - self.ref_log = log - self.data_dir = os.path.expanduser(os.environ.get('DATA_DIR',' data')) - # self.outputs_dir = set() - self.outputs_dir = {"outputs"} - if len(self.outputs_dir) == 0: - self.ref_log.logger.error("No outputs directory found.") + def __init__(self): + self.log = Logger("reformat") + + self.data_dir = os.path.expanduser(os.environ.get('DATA_DIR','data')) + self.outputs_dir = os.path.expanduser(os.environ.get('OUTPUT_DIR','outputs')) + if not os.path.isdir(self.outputs_dir): + self.log.logger.error("No outputs directory found.") quit() self.config_file = os.path.join(self.data_dir, os.environ.get('CONFIG_FILE', 'config.toml')) self.config = self._import_config() - self.include_runs = include_runs - self.versions = versions - - self.date_folders = self.set_date_folders(date_ranges) - self.mpc_folders = self.set_mpc_folders(mpc_params) - self.baselines = self.set_base_file() - self.parametrics = [] - self.parametrics = self.set_parametric_files(agg_params) + self.add_date_ranges() + self.add_mpc_params() + self.date_folders = self.set_date_folders() + self.mpc_folders = self.set_mpc_folders() + self.files = self.set_files() - np.random.seed(self.config['simulation']['random_seed']) self.fig_list = None self.save_path = os.path.join('outputs', 'images', datetime.now().strftime("%m%dT%H%M%S")) def main(self): - if self.config['simulation']['run_rl_agg'] or self.config['simulation']['run_rbo_mpc']: - # put a list of plotting functions here - self.sample_home = "Crystal-RXXFA" - self.plots = [self.rl2baseline, - self.rl2baseline_error, - self.plot_single_home] - - if self.config['simulation']['run_rl_simplified']: - # put a list of plotting functions here - self.plots = [self.rl_simplified, - self.rl_simplified_rp, - self.all_rps] - - self.images = self.plot_all() - - def tf_main(self): - """ Intended for plotting an image suite for use with the tensorflow reinforcement learning package. """ + # put a list of plotting functions here self.sample_home = "Crystal-RXXFA" self.plots = [self.rl2baseline, - self.rl2baseline_error, self.plot_single_home] - self.plot_all_homes() - self.images = self.plot_all() def plot_all(self, save_images=False): @@ -75,243 +52,128 @@ def plot_all(self, save_images=False): fig = make_subplots(specs=[[{"secondary_y": True}]]) fig.update_layout( font=dict( - # family="Courier New, monospace", - size=22, + size=65, ) ) + fig.update_xaxes( + title_standoff=80 + ) + fig.update_yaxes( + title_standoff=60 + ) fig = plot(fig) fig.show() figs += [fig] return figs - def create_summary(self, file): - with open(file) as f: - data = json.load(f) - - p_grid_agg = [] - for k,v in data.items(): - p_grid_agg.append(v['p_grid_opt']) - p_grid_agg = np.sum(p_grid_agg, axis=0).tolist() - - p_grid_setpoint = (np.ones(len(p_grid_agg)-1) * self.config['community']['total_number_homes'][0] * self.config['community']['house_p_avg']).tolist() - - summary = {"p_grid_aggregate": p_grid_agg, "p_grid_setpoint": p_grid_setpoint} - data["Summary"] = summary - - with open(file, 'w+') as f: - json.dump(data, f, indent=4) - - return data - def save_images(self): if not os.path.isdir(self.save_path): os.makedirs(self.save_path) for img in self.images: - self.ref_log.logger.info(f"Saving images of outputs to timestamped folder at {self.save_path}.") + self.log.logger.info(f"Saving images of outputs to timestamped folder at {self.save_path}.") try: path = os.path.join(self.save_path, f"{img.layout.title.text}.png") pio.write_image(img, path, width=1024, height=768) except: - self.ref_log.logger.error("Could not save plotly image(s) to outputs directory.") + self.log.logger.error("Could not save plotly image(s) to outputs directory.") - def add_date_ranges(self, additional_params): - start_dates = [datetime.strptime(self.config['simulation']['start_datetime'], '%Y-%m-%d %H')] + def add_date_ranges(self): + start_dates = set([datetime.strptime(self.config['simulation']['start_datetime'], '%Y-%m-%d %H')]) end_dates = set([datetime.strptime(self.config['simulation']['end_datetime'], '%Y-%m-%d %H')]) temp = {"start_datetime": start_dates, "end_datetime": end_dates} - for key in temp: - if key in additional_params: - for i in additional_params[key]: - temp[key].add(datetime.strptime(i, '%Y-%m-%d %H')) self.date_ranges = temp - def add_agg_params(self, additional_params): - alphas = set(self.config['rl']['parameters']['learning_rate']) - epsilons = set(self.config['rl']['parameters']['exploration_rate']) - betas = set(self.config['rl']['parameters']['discount_factor']) - batch_sizes = set(self.config['rl']['parameters']['batch_size']) - rl_horizons = set(self.config['rl']['utility']['rl_agg_action_horizon']) - rl_interval = set(self.config['rl']['utility']['hourly_steps']) - temp = {"alpha": alphas, "epsilon": epsilons, "beta": betas, "batch_size": batch_sizes, "rl_horizon": rl_horizons, "rl_interval": rl_interval} - for key in temp: - if key in additional_params: - temp[key] |= set(additional_params[key]) - self.agg_params = temp - - def add_mpc_params(self, additional_params): - n_houses = self.config['community']['total_number_homes'][0] + def add_mpc_params(self): + n_houses = self.config['community']['total_number_homes'] mpc_horizon = self.config['home']['hems']['prediction_horizon'] dt = self.config['home']['hems']['sub_subhourly_steps'] - # for i in self.config['rl']['utility']['hourly_steps']: - # for j in self.config['home']['hems']['sub_subhourly_steps']: - # dt.append(60 // i // j) + solver = self.config['home']['hems']['solver'] check_type = self.config['simulation']['check_type'] - temp = {"n_houses": set([n_houses]), "mpc_prediction_horizons": set(mpc_horizon), "mpc_hourly_steps": set(dt), "check_type": set([check_type])} - for key in temp: - if key in additional_params: - temp[key] |= set(additional_params[key]) + agg_interval = self.config['agg']['subhourly_steps'] + temp = {"n_houses": set([n_houses]), "mpc_prediction_horizons": set([mpc_horizon]), "mpc_hourly_steps": set([dt]), "check_type": set([check_type]), "agg_interval": set([agg_interval]), "solver": set([solver])} + # for key in temp: + # if key in additional_params: + # temp[key] |= set(additional_params[key]) self.mpc_params = temp - self.versions |= set(self.config['rl']['version']) + self.versions = set([self.config['simulation']['named_version']]) - def set_date_folders(self, additional_params): - self.add_date_ranges(additional_params) + def set_date_folders(self): temp = [] - self.date_ranges['mpc_steps'] = set(self.config['home']['hems']['sub_subhourly_steps']) - self.date_ranges['rl_steps'] = set(self.config['rl']['utility']['hourly_steps']) + # self.date_ranges['mpc_steps'] = set([self.config['home']['hems']['sub_subhourly_steps']]) + # self.date_ranges['rl_steps'] = set([self.config['agg']['subhourly_steps']]) keys, values = zip(*self.date_ranges.items()) permutations = [dict(zip(keys, v)) for v in it.product(*values)] permutations = sorted(permutations, key=lambda i: i['end_datetime'], reverse=True) - for j in self.outputs_dir: - for i in permutations: - date_folder = os.path.join(j, f"{i['start_datetime'].strftime('%Y-%m-%dT%H')}_{i['end_datetime'].strftime('%Y-%m-%dT%H')}_{60 // i['rl_steps']}-{60 // i['rl_steps'] // i['mpc_steps']}") - print(date_folder) - if os.path.isdir(date_folder): - hours = i['end_datetime'] - i['start_datetime'] - hours = int(hours.total_seconds() / 3600) - timesteps = hours * i['rl_steps'] - print(hours, i['rl_steps']) - minutes = 60 // i['rl_steps'] - x_lims = [i['start_datetime'] + timedelta(minutes=minutes*x) for x in range(timesteps)] + for i in permutations: + date_folder = os.path.join(self.outputs_dir, f"{i['start_datetime'].strftime('%Y-%m-%dT%H')}_{i['end_datetime'].strftime('%Y-%m-%dT%H')}") + self.log.logger.info(f"Looking for files in: {date_folder}.") + if os.path.isdir(date_folder): + hours = i['end_datetime'] - i['start_datetime'] + hours = int(hours.total_seconds() / 3600) - new_folder = {"folder": date_folder, "hours": hours, "start_dt": i['start_datetime'], "name": j+" ", "ts": timesteps, "x_lims": x_lims, "agg_dt": i['rl_steps']} - temp.append(new_folder) + new_folder = {"folder": date_folder, "hours": hours, "start_dt": i['start_datetime']} + temp.append(new_folder) if len(temp) == 0: - self.ref_log.logger.error("No files found for the date ranges specified.") + self.log.logger.error("No files found for the date ranges specified.") exit() return temp - def set_mpc_folders(self, additional_params): - self.add_mpc_params(additional_params) + def set_mpc_folders(self): temp = [] keys, values = zip(*self.mpc_params.items()) permutations = [dict(zip(keys, v)) for v in it.product(*values)] for j in self.date_folders: - # for k in self.config['rl']['utility']['hourly_steps']: for i in permutations: - mpc_folder = os.path.join(j["folder"], f"{i['check_type']}-homes_{i['n_houses']}-horizon_{i['mpc_prediction_horizons']}-interval_{60 // i['mpc_hourly_steps'] // j['agg_dt']}") + mpc_folder = os.path.join(j["folder"], f"{i['check_type']}-homes_{i['n_houses']}-horizon_{i['mpc_prediction_horizons']}-interval_{60 // i['agg_interval']}-{60 // i['mpc_hourly_steps'] // i['agg_interval']}-solver_{i['solver']}") if os.path.isdir(mpc_folder): - # timesteps = j['hours'] * i['mpc_hourly_steps'] * k - # minutes = 60 // i['mpc_hourly_steps'] // k - # x_lims = [j['start_dt'] + timedelta(minutes=minutes*x) for x in range(timesteps)] - name = j['name'] - set = {'path': mpc_folder, 'dt': i['mpc_hourly_steps'], 'ts': j['ts'], 'x_lims': j['x_lims'], 'name': name, "agg_dt":j['agg_dt']} + timesteps = j['hours'] * i['agg_interval'] + minutes = 60 // i['agg_interval'] + x_lims = [j['start_dt'] + timedelta(minutes=minutes*x) for x in range(timesteps)] + + set = {'path': mpc_folder, 'agg_dt': i['agg_interval'], 'ts': timesteps, 'x_lims': x_lims,} if not mpc_folder in temp: temp.append(set) for x in temp: print(x['path']) return temp - def set_base_file(self): + def set_files(self): temp = [] keys, values = zip(*self.mpc_params.items()) permutations = [dict(zip(keys, v)) for v in it.product(*values)] + + color_families = [['rgb(204,236,230)','rgb(153,216,201)','rgb(102,194,164)','rgb(65,174,118)','rgb(35,139,69)','rgb(0,88,36)'], + ['rgb(191,211,230)','rgb(158,188,218)','rgb(140,150,198)','rgb(140,107,177)','rgb(136,65,157)','rgb(110,1,107)'], + ['rgb(217,217,217)','rgb(189,189,189)','rgb(150,150,150)','rgb(115,115,115)','rgb(82,82,82)','rgb(37,37,37)'], + ['rgb(253,208,162)','rgb(253,174,107)','rgb(253,141,60)','rgb(241,105,19)','rgb(217,72,1)','rgb(140,45,4)'],] + c = 0 + d = 0 + dash = ["solid", "dash", "dot", "dashdot"] for j in self.mpc_folders: path = j['path'] for i in permutations: for k in self.versions: - file = os.path.join(path, "baseline", f"baseline_version-{k}-results.json") - self.ref_log.logger.debug(f"Looking for baseline file at {file}") - if os.path.isfile(file): - name = f"Baseline - {j['name']} - v{k}" - with open(file) as f: - data = json.load(f) - if "Summary" not in data: - self.create_summary(file) - set = {"results": file, "name": name, "parent": j} - temp.append(set) - self.ref_log.logger.info(f"Adding baseline file at {file}") - - return temp - - def set_rl_files(self, additional_params): - temp = [] - names = [] - self.add_agg_params(additional_params) - x = 0 - for i in self.mpc_folders: - path = i['path'] - rl_agg_folder = os.path.join(path, "rl_agg") - all_params = {**self.agg_params, **self.mpc_params} - keys, values = zip(*all_params.items()) - permutations = [dict(zip(keys, v)) for v in it.product(*values)] - for j in permutations: - for vers in self.versions: - if os.path.isdir(rl_agg_folder): - rl_agg_path = f"agg_horizon_{j['rl_horizon']}-interval_{60 // j['rl_interval']}-alpha_{j['alpha']}-epsilon_{j['epsilon']}-beta_{j['beta']}_batch-{j['batch_size']}_version-{vers}" - rl_agg_file = os.path.join(rl_agg_folder, rl_agg_path, "results.json") - self.ref_log.logger.debug(f"Looking for a RL aggregator file at {rl_agg_file}") - if os.path.isfile(rl_agg_file): - q_results = os.path.join(rl_agg_path, "q-results.json") - q_file = os.path.join(rl_agg_folder, q_results) - # name = i['name'] - name = "" - # for k,v in j.items(): - # if len(all_params[k]) > 1 or k == "mpc_hourly_steps": - # name += f"{k} = {v}, " - # name = f"horizon={j['rl_horizon']}, alpha={j['alpha']}, beta={j['beta']}, epsilon={j['epsilon']}, batch={j['batch_size']}, disutil={j['mpc_disutility']}, discomf={j['mpc_discomfort']}" - name += f"v = {vers}" - with open(rl_agg_file) as f: - data = json.load(f) - if "Summary" not in data: - self.create_summary(rl_agg_file) - set = {"results": rl_agg_file, "q_results": q_file, "name": name, "parent": i, "rl_agg_action_horizon": j["rl_horizon"], "params": j, "skip": j['rl_interval'] // i['dt'], "color":plotly.colors.qualitative.Dark24[x]} - if not name in names: - temp.append(set) - names.append(name) - self.ref_log.logger.info(f"Adding an RL aggregator agent file at {rl_agg_file}") - x += 1 - - if len(temp) == 0: - self.ref_log.logger.warning("Parameterized RL aggregator runs are empty for this config file.") - - return temp - - def set_simplified_files(self, additional_params): - temp = [] - self.add_agg_params(additional_params) - for i in self.mpc_folders: - path = i['path'] - simplified_folder = os.path.join(path, "simplified") - all_params = {**self.agg_params, **self.mpc_params} - keys, values = zip(*all_params.items()) - permutations = [dict(zip(keys, v)) for v in it.product(*values)] - for j in permutations: - for vers in self.versions: - if os.path.isdir(simplified_folder): - simplified_path = f"agg_horizon_{j['rl_horizon']}-alpha_{j['alpha']}-epsilon_{j['epsilon']}-beta_{j['beta']}_batch-{j['batch_size']}_version-{vers}" - simplified_file = os.path.join(simplified_folder, simplified_path, "results.json") - if os.path.isfile(simplified_file): - q_file = os.path.join(simplified_folder, simplified_path, "q-results.json") - if os.path.isfile(q_file): - # name = i['name'] - name = "" - for k,v in j.items(): - if len(all_params[k]) > 1: - name += f"{k} = {v}, " - set = {"results": simplified_file, "q_results": q_file, "name": name, "parent": i} - temp.append(set) + dir = os.path.join(path, f"version-{k}") + for case_dir in os.listdir(dir): + file = os.path.join(dir, case_dir, "results.json") + if os.path.isfile(file): + name = f"{case_dir}, v = {k}" + set = {"results": file, "name": name, "parent": j, "color": color_families[c][d], "dash":dash[c]} + temp.append(set) + self.log.logger.info(f"Adding baseline file at {file}") + d = (d + 1) % len(color_families[c]) + c = (c + 1) % len(color_families) return temp - def set_parametric_files(self, additional_params): - if self.config['simulation']['run_rl_agg'] or "rl_agg" in self.include_runs: - self.parametrics += self.set_rl_files(additional_params) - - if self.config['simulation']['run_rl_simplified'] or "simplified" in self.include_runs: - self.parametrics += self.set_simplified_files(additional_params) - - return self.parametrics - - def set_other_files(self, otherfile): - self.parametrics.append(otherfile) - - def _type_list(self, type): + def get_type_list(self, type): type_list = set([]) i = 0 - for file in (self.baselines + self.parametrics): + for file in self.files: with open(file["results"]) as f: data = json.load(f) @@ -328,11 +190,12 @@ def _type_list(self, type): else: type_list = type_list.intersection(temp) + self.log.logger.info(f"{len(type_list)} homes found of type {type}: {type_list}") return type_list def _import_config(self): if not os.path.exists(self.config_file): - self.ref_log.logger.error(f"Configuration file does not exist: {self.config_file}") + self.log.logger.error(f"Configuration file does not exist: {self.config_file}") sys.exit(1) with open(self.config_file, 'r') as f: @@ -342,14 +205,13 @@ def _import_config(self): def plot_environmental_values(self, name, fig, summary, file, fname): fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=summary["OAT"][0:file["parent"]["ts"]], name=f"OAT (C)", visible='legendonly')) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=summary["GHI"][0:file["parent"]["ts"]], name=f"GHI (W/m2)", visible='legendonly')) + fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=summary["GHI"][0:file["parent"]["ts"]], name=f"GHI", line={'color':'goldenrod', 'width':8}, visible='legendonly')) fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=summary["TOU"][0:file["parent"]["ts"]], name=f"TOU Price ($/kWh)", line_shape='hv', visible='legendonly'), secondary_y=True) fig = self.plot_thermal_bounds(fig, file['parent']['x_lims'], name, fname) return fig def plot_thermal_bounds(self, fig, x_lims, name, fname): - for i in self.outputs_dir: - ah_file = os.path.join(i, f"all_homes-{self.config['community']['total_number_homes'][0]}-config.json") + ah_file = os.path.join(self.outputs_dir, f"all_homes-{self.config['community']['total_number_homes']}-config.json") with open(ah_file) as f: data = json.load(f) @@ -357,38 +219,32 @@ def plot_thermal_bounds(self, fig, x_lims, name, fname): if dict['name'] == name: data = dict - fig.add_trace(go.Scatter(x=x_lims, y=data['hvac']['temp_in_min'] * np.ones(len(x_lims)), name=f"Tin_min(C)", fill=None, mode='lines', line_color='indigo')) - fig.add_trace(go.Scatter(x=x_lims, y=data['hvac']['temp_in_max'] * np.ones(len(x_lims)), name=f"Tin_max", fill='tonexty' , mode='lines', line_color='indigo')) + fig.add_trace(go.Scatter(x=x_lims, y=data['hvac']['temp_in_min'] * np.ones(len(x_lims)), name=f"Tin_min", fill=None, showlegend=False, mode='lines', line_color='lightsteelblue')) + fig.add_trace(go.Scatter(x=x_lims, y=data['hvac']['temp_in_max'] * np.ones(len(x_lims)), name=f"Tin_bounds", fill='tonexty' , mode='lines', line_color='lightsteelblue')) - fig.add_trace(go.Scatter(x=x_lims, y=data['wh']['temp_wh_min'] * np.ones(len(x_lims)), name=f"Twh_min(C)", fill=None, mode='lines', line_color='red')) - fig.add_trace(go.Scatter(x=x_lims, y=data['wh']['temp_wh_max'] * np.ones(len(x_lims)), name=f"Twh_max", fill='tonexty' , mode='lines', line_color='red')) + fig.add_trace(go.Scatter(x=x_lims, y=data['wh']['temp_wh_min'] * np.ones(len(x_lims)), name=f"Twh_min", fill=None, showlegend=False, mode='lines', line_color='pink')) + fig.add_trace(go.Scatter(x=x_lims, y=data['wh']['temp_wh_max'] * np.ones(len(x_lims)), name=f"Twh_bounds", fill='tonexty' , mode='lines', line_color='pink')) return fig def plot_base_home(self, name, fig, data, summary, fname, file, plot_price=True): - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["temp_in_opt"], name=f"Tin (C) - {fname}")) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["temp_wh_opt"], name=f"Twh (C) - {fname}")) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["waterdraws"], name=f"Waterdraws (L) - {fname}")) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.average(data["waterdraws"])*np.ones(len(data["waterdraws"])), name=f"Avg Waterdraws (L) - {fname}")) - - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["p_grid_opt"], name=f"Pgrid (kW) - {fname}", line_shape='hv', visible='legendonly')) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["p_load_opt"], name=f"Pload (kW) - {fname}", line_shape='hv', visible='legendonly')) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["hvac_cool_on_opt"], name=f"HVAC Cool Cmd - {fname}", line_shape='hv', visible='legendonly'), secondary_y=True) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["hvac_heat_on_opt"], name=f"HVAC Heat Cmd - {fname}", line_shape='hv', visible='legendonly'), secondary_y=True) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["wh_heat_on_opt"], name=f"WH Heat Cmd - {fname}", line_shape='hv', visible='legendonly'), secondary_y=True) - if plot_price: - actual_price = np.add(summary["TOU"][:len(summary['RP'])], summary["RP"]) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=actual_price, name=f"Actual Price ($/kWh) - {fname}", visible='legendonly'), secondary_y=True) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.divide(np.cumsum(actual_price), np.arange(len(actual_price))+1), name=f"Average Actual Price ($/kWh) - {fname}", visible='legendonly'), secondary_y=True) - try: - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data['correct_solve'], name=f"Correct Solve - {fname}", line_shape='hv', visible='legendonly'), secondary_y=True) - self.cost_dict[name][fname] = np.sum(data['cost_opt']) - self.cs_dict[name][fname] = np.sum(data['correct_solve']) - except: - pass + fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["temp_in_opt"], name=f"Tin - {fname}", legendgroup='tin', line={'color':'blue', 'width':8, 'dash':file['dash']})) + fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["temp_wh_opt"], showlegend=True, legendgroup='twh', name=f"Twh - {fname}", line={'color':'firebrick', 'width':8, 'dash':file['dash']})) + + fig.update_layout(legend=dict( + yanchor="top", + y=0.99, + xanchor="left", + x=0.03, + font=dict( + size=65), + ), + yaxis_title="Temperature (deg C)" + ) + return fig def plot_pv(self, name, fig, data, fname, file): - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["p_pv_opt"], name=f"Ppv (kW) - {fname}", line_shape='hv', visible='legendonly')) + fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["p_pv_opt"], name=f"Ppv (kW)", line_color='orange', line_shape='hv', visible='legendonly')) fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["u_pv_curt_opt"], name=f"U_pv_curt (kW) - {fname}", line_shape='hv', visible='legendonly')) return fig @@ -402,21 +258,21 @@ def plot_single_home(self, fig): if self.sample_home is None: if type is None: type = "base" - self.ref_log.logger.warning("Specify a home type or name. Proceeding with home of type: \"base\".") + self.log.logger.warning("Specify a home type or name. Proceeding with home of type: \"base\".") type_list = self._type_list(type) self.sample_home = random.sample(type_list,1)[0] - self.ref_log.logger.info(f"Proceeding with home: {name}") + self.log.logger.info(f"Proceeding with home: {name}") flag = False - for file in (self.baselines + self.parametrics): + for file in self.files: with open(file["results"]) as f: comm_data = json.load(f) try: data = comm_data[self.sample_home] except: - self.ref_log.logger.error(f"No home with name: {self.sample_home}") + self.log.logger.error(f"No home with name: {self.sample_home}") return type = data["type"] @@ -426,117 +282,153 @@ def plot_single_home(self, fig): fig = self.plot_environmental_values(self.sample_home, fig, summary, file, file["name"]) flag = True - fig = self.plot_base_home(self.sample_home, fig, data, summary, file["name"], file) - fig.update_xaxes(title_text="Time of Day (hour)") fig.update_layout(title_text=f"{self.sample_home} - {type} type") + fig = self.plot_base_home(self.sample_home, fig, data, summary, file["name"], file) + if 'pv' in type: fig = self.plot_pv(self.sample_home, fig, data, file["name"], file) - if 'battery' in type: + if 'batt' in type: fig = self.plot_battery(self.sample_home, fig, data, file["name"], file) return fig def plot_all_homes(self, fig=None): - t = PrettyTable(["Home"]+[file['name'] for file in self.parametrics]) - u = PrettyTable(["Home"]+[file['name'] for file in self.parametrics]) homes = ["Crystal-RXXFA","Myles-XQ5IA","Lillie-NMHUH","Robert-2D73X","Serena-98EPE","Gary-U95TS","Bruno-PVRNB","Dorothy-9XMNY","Jason-INS3S","Alvin-4BAYB",] - self.cost_dict = {} - self.cs_dict = {} for self.sample_home in homes: - self.cost_dict[self.sample_home] = {} - self.cs_dict[self.sample_home] = {} fig = make_subplots(specs=[[{"secondary_y": True}]]) fig.update_layout( font=dict( - # family="Courier New, monospace", - size=22, + size = 12 ) ) fig = self.plot_single_home(fig) - t.add_row([self.sample_home] + [self.cost_dict[self.sample_home][file['name']] for file in self.parametrics]) - u.add_row([self.sample_home] + [self.cs_dict[self.sample_home][file['name']] for file in self.parametrics]) - fig.show() - print(t) - # u.add_row(["average"] + [np.average([self.cs_dict[sh][file['name']] for sh in homes]) for file in self.parametrics])) - print(u) - return - def rl_simplified(self): - flag = False + return - for file in self.parametrics: - with open(file['results']) as f: + def plot_baseline(self, fig): + for file in self.files: + with open(file["results"]) as f: data = json.load(f) - if flag == False: - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["Summary"]["p_grid_setpoint"], name=f"Aggregate Load Setpoint")) - setpoint = np.array(data["Summary"]["p_grid_setpoint"]) - flag = True - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["Summary"]["p_grid_aggregate"], name=f"Aggregate Load - {file['name']}")) - agg = np.array(data["Summary"]["p_grid_aggregate"]) - error = np.subtract(agg, 50*np.ones(len(agg))) - # fig1.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.cumsum(np.square(error)), name=f"L2 Norm Error {file['name']}")) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.cumsum(abs(error)), name=f"Cummulative Error - {file['name']}")) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=abs(error), name=f"Abs Error - {file['name']}")) - fig.update_layout(title_text="Aggregate Load") + ts = len(data['Summary']['p_grid_aggregate'])-1 + fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["Summary"]["p_grid_aggregate"], name=f"Agg Load - {file['name']}", line_shape='hv', line={'color':file['color'], 'width':4, 'dash':'solid'})) + fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.cumsum(np.divide(data["Summary"]["p_grid_aggregate"], file['parent']['agg_dt'])), name=f"Cumulative Agg Load - {file['name']}", line_shape='hv', visible='legendonly', line={'color':file['color'], 'width':4, 'dash':'dash'})) + fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.divide(np.cumsum(data["Summary"]["p_grid_aggregate"]), np.arange(ts + 1) + 1), name=f"Avg Cumulative Agg Load - {file['name']}", line_shape='hv', visible='legendonly', line={'color':file['color'], 'width':4, 'dash':'dashdot'})) return fig - def rl_simplified_rp(self, fig=None): - for file in self.parametrics: - with open(file['results']) as f: + def plot_typ_day(self, fig): + rl_counter = 0 + tou_counter = 0 + dn_counter = 0 + for file in self.files: + flag = True + + with open(file["results"]) as f: data = json.load(f) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["Summary"]["RP"], name=f"Reward Price Signal - {file['name']}")) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.divide(np.cumsum(data["Summary"]["RP"]), np.arange(file['parent']['ts']) + 1), name=f"Rolling Average Reward Price - {file['name']}")) - fig = self.plot_mu(fig) - fig.update_layout(title_text="Reward Price Signal") - return fig - def plot_mu(self, fig): - for file in self.parametrics: + name = file["name"] - with open(file['results']) as f: - data = json.load(f) + ts = len(data['Summary']['p_grid_aggregate'])-1 + rl_setpoint = data['Summary']['p_grid_setpoint'] + if 'clipped' in file['name']: + rl_setpoint = np.clip(rl_setpoint, 45, 60) + loads = np.array(data["Summary"]["p_grid_aggregate"]) + loads = loads[:len(loads) // (24*file['parent']['agg_dt']) * 24 * file['parent']['agg_dt']] + if len(loads) > 24: + daily_max_loads = np.repeat(np.amax(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt']) + daily_min_loads = np.repeat(np.amin(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt']) + daily_range_loads = np.subtract(daily_max_loads, daily_min_loads) + daily_range_loads = [abs(loads[max(i-6, 0)] - loads[min(i+6, len(loads)-1)]) for i in range(len(loads))] + daily_avg_loads = np.repeat(np.mean(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt']) + daily_std_loads = np.repeat(np.std(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt']) + daily_std_loads = [np.std(loads[max(i-6, 0):i+6]) for i in range(len(loads))] - try: - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["Summary"]["RP"], name=f"RP (Selected Action)", line_shape='hv')) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.divide(np.cumsum(data["Summary"]["RP"]), np.arange(file['parent']['ts']) + 1), name=f"Rolling Average Reward Price - {file['name']}")) - except: - self.ref_log.logger.warning("Could not find data on the selected action") - with open(file['q_results']) as f: - data = json.load(f) + composite_day = np.average(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=0) + fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=composite_day, name=f"{name}", opacity=0.5, showlegend=flag, line={'color':clr, 'width':8, 'dash':dash})) + + fig.update_layout(legend=dict( + yanchor="top", + y=0.45, + xanchor="left", + x=0.7 + )) + + fig.update_layout( + font=dict( + # family="Courier New, monospace", + size=65, + ), + title="Avg Daily Load Profile", + xaxis_title="Time of Day", + yaxis_title="Agg. Demand (kW)" + ) + + fig.update_xaxes( + title_standoff=80 + ) + fig.update_yaxes( + title_standoff=60 + ) - mus =[] - for agent in data: - agent_data = data[agent] - mu = np.array(agent_data["mu"]) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=mu, name=f"Mu (Assumed Best Action) - {file['name']} - {agent}")) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=mu + file['params']['epsilon'], name=f"Mu +1 std dev - {file['name']} - {agent}", fill=None , mode='lines', line_color=plotly.colors.sequential.Blues[3])) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=mu - file['params']['epsilon'], name=f"Mu -1 std dev - {file['name']} - {agent}", fill='tonexty' , mode='lines', line_color=plotly.colors.sequential.Blues[3])) - if len(mu) > 0: - mus.append(mu) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.sum(mus, axis=0), name=f"Total Mu (RP without noise) - {file['name']}")) - fig.update_layout(yaxis = {'exponentformat':'e'}) - fig.update_layout(title_text = "Reward Price Signal") return fig - def plot_baseline(self, fig): - for file in self.baselines: + def plot_max_and_12hravg(self, fig): + for file in self.files: + # all_avgs.add_column() + clr = file['color'] + with open(file["results"]) as f: data = json.load(f) + name = file["name"] ts = len(data['Summary']['p_grid_aggregate'])-1 - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["Summary"]["p_grid_aggregate"], name=f"Agg Load - {file['name']}", line_shape='hv')) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.cumsum(np.divide(data["Summary"]["p_grid_aggregate"], file['parent']['agg_dt'])), name=f"Cumulative Agg Load - {file['name']}", line_shape='hv', visible='legendonly')) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.divide(np.cumsum(data["Summary"]["p_grid_aggregate"]), np.arange(ts) + 1), name=f"Avg Cumulative Agg Load - {file['name']}", line_shape='hv', visible='legendonly')) + rl_setpoint = data['Summary']['p_grid_setpoint'] + if 'clipped' in file['name']: + rl_setpoint = np.clip(rl_setpoint, 45, 60) + loads = np.array(data["Summary"]["p_grid_aggregate"]) + loads = loads[:len(loads) // (24*file['parent']['agg_dt']) * 24 * file['parent']['agg_dt']] + if len(loads) > 24: + daily_max_loads = np.repeat(np.amax(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt']) + daily_min_loads = np.repeat(np.amin(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt']) + daily_range_loads = np.subtract(daily_max_loads, daily_min_loads) + daily_range_loads = [abs(loads[max(i-6, 0)] - loads[min(i+6, len(loads)-1)]) for i in range(len(loads))] + daily_avg_loads = np.repeat(np.mean(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt']) + daily_std_loads = np.repeat(np.std(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt']) + + fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_max_loads, name=f"{name} - Daily Max", line_shape='hv', opacity=1, legendgroup="first", line={'color':'firebrick', 'dash':dash, 'width':8})) + fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=rl_setpoint, name=f"{name} - 12 Hr Avg", opacity=0.5, legendgroup="second", line={'color':'blue', 'dash':dash, 'width':8})) + + fig.update_layout(legend=dict( + yanchor="top", + y=0.8, + xanchor="left", + x=0.7 + )) + + fig.update_layout( + font=dict( + size=65, + ), + title="12 Hour Avg and Daily Max", + yaxis_title="Agg. Demand (kW)" + ) + + fig.update_xaxes( + title_standoff=80 + ) + fig.update_yaxes( + title_standoff=60 + ) + return fig + def plot_parametric(self, fig): - all_daily_stats = PrettyTable(['run name', 'avg daily max', 'overall max', 'avg daily range']) - for file in self.parametrics: - # all_avgs.add_column() + all_daily_stats = PrettyTable(['run name', 'avg daily max', 'std daily max','overall max', 'avg daily range']) + for file in self.files: clr = file['color'] with open(file["results"]) as f: @@ -549,212 +441,52 @@ def plot_parametric(self, fig): rl_setpoint = np.clip(rl_setpoint, 45, 60) loads = np.array(data["Summary"]["p_grid_aggregate"]) loads = loads[:len(loads) // (24*file['parent']['agg_dt']) * 24 * file['parent']['agg_dt']] - if len(loads) > 24: + if len(loads) >= 24: daily_max_loads = np.repeat(np.amax(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt']) daily_min_loads = np.repeat(np.amin(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt']) daily_range_loads = np.subtract(daily_max_loads, daily_min_loads) + daily_range_loads = [abs(loads[max(i-6, 0)] - loads[min(i+6, len(loads)-1)]) for i in range(len(loads))] daily_avg_loads = np.repeat(np.mean(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt']) - # daily_std_loads = np.repeat(np.std(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt']) daily_std_loads = [np.std(loads[max(i-6, 0):i+6]) for i in range(len(loads))] - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=rl_setpoint, name=f"RL Setpoint Load - {name}", opacity=0.5, line={'color':clr, 'width':4})) + composite_day = np.average(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=0) + fig.update_layout(legend=dict( + yanchor="top", + y=0.45, + xanchor="left", + x=0.5 + )) + fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=rl_setpoint, name=f"{name} - 12 Hr Avg", opacity=0.5, line={'color':clr, 'width':8})) fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["Summary"]["p_grid_aggregate"], name=f"Agg Load - RL - {name}", line_shape='hv', line={'color':clr})) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_max_loads, name=f"Daily Max Agg Load - RL - {name}", line_shape='hv', opacity=0.5, line={'color':clr, 'dash':'dot'})) + fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_max_loads, name=f"{name} - Daily Max", line_shape='hv', opacity=0.5, line={'color':clr, 'dash':'dot'})) fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_min_loads, name=f"Daily Min Agg Load - RL - {name}", line_shape='hv', opacity=0.5, line={'color':clr, 'dash':'dash'})) fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_range_loads, name=f"Daily Agg Load Range - RL - {name}", line_shape='hv', opacity=0.5, line={'color':clr})) + fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.average(daily_range_loads) * np.ones(len(loads)), name=f"Avg Daily Agg Load Range - RL - {name}", line_shape='hv', opacity=0.5, line={'color':clr})) fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_avg_loads, name=f"Daily Avg Agg Load - RL - {name}", line_shape='hv', opacity=0.5, line={'color':clr, 'dash':'dash'})) - # fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.subtract(daily_max_loads, daily_avg_loads), name=f"Daily Max-Avg Agg Load - RL - {name}", line_shape='hv', line={'color':clr, 'dash':'dot'})) fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_std_loads, name=f"Daily Std Agg Load - RL - {name}", line_shape='hv', opacity=0.5, line={'color':clr, 'dash':'dashdot'})) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.cumsum(np.divide(data["Summary"]["p_grid_aggregate"],file['parent']['agg_dt'])), name=f"Cumulative Agg Load - RL - {name}", line_shape='hv', visible='legendonly', line={'color':clr, 'dash':'dot'})) - # fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.divide(np.cumsum(data["Summary"]["p_grid_aggregate"][:ts+1]),np.arange(ts)+1), name=f"Avg Load - RL - {name}", line_shape='hv', visible='legendonly')) - # fig = self.plot_mu(fig) - all_daily_stats.add_row([file['name'], np.average(daily_max_loads), max(daily_max_loads), np.average(daily_range_loads)]) + fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.average(daily_std_loads) * np.ones(len(loads)), name=f"Avg Daily Std Agg Load - RL - {name}", line_shape='hv', opacity=0.5, line={'color':clr, 'dash':'dashdot'})) + fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.cumsum(np.divide(data["Summary"]["p_grid_aggregate"],file['parent']['agg_dt'])), name=f"{name}", line_shape='hv', visible='legendonly', line={'color':clr, })) + all_daily_stats.add_row([file['name'], np.average(daily_max_loads), np.std(daily_max_loads), max(daily_max_loads), np.average(daily_range_loads)]) + else: + self.log.logger.warning("Not enough data collected to have daily stats, try running the aggregator for longer.") print(all_daily_stats) return fig - def plot_baseline_error(self, fig): - for rl_file in self.parametrics: - with open(rl_file['results']) as f: - rldata = json.load(f) - - for file in self.baselines: - with open(file['results']) as f: - data = json.load(f) - - rl2base_conversion = max(1, file['parent']['dt'] // rl_file['parent']['dt']) - base2rl_conversion = max(1, rl_file['parent']['dt'] // file['parent']['dt']) - base_load = np.repeat(data['Summary']['p_grid_aggregate'], base2rl_conversion) - rl_setpoint = np.repeat(rldata['Summary']['p_grid_setpoint'], rl2base_conversion) - rl_load = np.repeat(rldata['Summary']['p_grid_aggregate'], rl2base_conversion) - if rl_setpoint[0] == 10: - rl_setpoint = rl_setpoint*3 - rl_setpoint = rl_setpoint[:len(rl_load)] - rl_error = np.subtract(rl_load, rl_setpoint) - base_error = np.subtract(base_load[:len(rl_setpoint)], rl_setpoint[:len(base_load)]) - rl2base_error = np.subtract(abs(base_error[:len(rl_setpoint)]), abs(rl_error[:len(base_load)]))/max(rl_file['parent']['dt'], file['parent']['dt']) - - if file['parent']['ts'] > rl_file['parent']['ts']: - x_lims = file['parent']['x_lims'] - ts_max = file['parent']['ts'] - dt = file['parent']['dt'] - else: - x_lims = rl_file['parent']['x_lims'] - dt = rl_file['parent']['dt'] - ts_max = rl_file['parent']['ts'] - - ts = len(rl2base_error) - fig.add_trace(go.Scatter(x=x_lims, y=rl2base_error, name=f"RL2Baseline Error - RL{rl_file['name']} and Baseline{file['name']}", visible='legendonly')) - fig.add_trace(go.Scatter(x=x_lims, y=np.divide(np.cumsum(rl2base_error), (np.arange(ts)+1)), name=f"Avg RL2Baseline Error - RL{rl_file['name']} and Baseline{file['name']}", visible='legendonly')) - - fig.add_trace(go.Scatter(x=x_lims, y=base_error, name=f"Baseline Error - RL{rl_file['name']} and Baseline{file['name']}")) - fig.add_trace(go.Scatter(x=x_lims, y=abs(base_error), name=f"Abs Baseline Error - RL{rl_file['name']} and Baseline{file['name']}")) - - hourly_base_error = np.zeros(np.int(np.ceil(len(base_error) / (24*ts_max)) * (24*ts_max))) - hourly_base_error[:len(base_error)] = abs(base_error) - hourly_base_error = hourly_base_error.reshape(dt,-1).sum(axis=0) - fig.add_trace(go.Scatter(x=x_lims[::dt], y=hourly_base_error, name=f"Baseline Hourly Error - RL{rl_file['name']} and Baseline{file['name']}")) - fig.add_trace(go.Scatter(x=x_lims[::dt], y=np.cumsum(hourly_base_error), name=f"Cumulative Baseline Hourly Error - RL{rl_file['name']} and Baseline{file['name']}")) - - period = self.config['simulation']['checkpoint_interval'] - period_hours = {"hourly": 1, "daily": 24, "weekly": 7*24} - if not period in period_hours: - if isinstance(period, int): - period_hours[period] = period - num_periods = int(np.ceil(len(hourly_base_error) / period_hours[period])) - periodic_acum_error = np.zeros(num_periods * period_hours[period]) - periodic_acum_error[:len(hourly_base_error)] = hourly_base_error - periodic_acum_error = hourly_base_error.reshape(num_periods, -1) - periodic_acum_error = np.cumsum(periodic_acum_error, axis=1).flatten() - fig.add_trace(go.Scatter(x=file['parent']['x_lims'][::max(rl_file['parent']['dt'], file['parent']['dt'])], y=periodic_acum_error, name=f"Accumulated Baseline Hourly Error - RL{rl_file['name']} and Baseline{file['name']}", visible='legendonly')) - return fig - - def plot_parametric_error(self, fig): - t = PrettyTable(["run name", "std all", "std exc. day 1", "(avg load setpoint - observed load)^2", "recorded reward"]) - for file in self.parametrics: - with open(file['results']) as f: - data = json.load(f) - - name = file['name'] - rl_load = data['Summary']['p_grid_aggregate'] - rl_setpoint = data['Summary']['p_grid_setpoint'] - if 'clipped' in file['name']: - rl_setpoint = np.clip(rl_setpoint, 45, 60) - rl_error = np.subtract(rl_load[:len(rl_setpoint)], rl_setpoint[:len(rl_load)]) / file['parent']['agg_dt'] - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=rl_error, name=f"Error - {name} (kWh)", line_shape='hv', visible='legendonly')) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.divide(np.cumsum(rl_error), np.arange(len(rl_error))+1), name=f"Average Error - {name} (kWh)", line_shape='hv', visible='legendonly')) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=rl_setpoint, name=f"Setpoint - {name} (kW)", line_shape='hv', visible='legendonly')) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=abs(rl_error), name=f"Abs Error - {name} (kWh)", line_shape='hv', visible='legendonly')) - - hourly_rl_error = np.zeros(np.int(np.ceil(len(rl_error) / (24*file['parent']['agg_dt'])) * (24*file['parent']['agg_dt']))) - hourly_rl_error[:len(rl_error)] = abs(rl_error) - hourly_rl_error = hourly_rl_error.reshape(file['parent']['agg_dt'],-1).sum(axis=0) - hourly_rl_error = np.repeat(hourly_rl_error, file['parent']['agg_dt']) - t.add_row([file['name'],np.std(rl_load), np.std(rl_load[24:]), np.sum(np.power(np.subtract(rl_load, rl_setpoint[:len(rl_load)]),2)), np.sum(data['Summary']['rl_rewards'])]) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=hourly_rl_error, name=f"Hourly Error - {name} (kWh)", line_shape='hv', visible='legendonly')) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.cumsum(hourly_rl_error/file['parent']['agg_dt']), name=f"Cumulative Hourly Error - {name} (kWh)", line_shape='hv', visible='legendonly')) - - period = self.config['simulation']['checkpoint_interval'] - period_hours = {"hourly": 1, "daily": 24, "weekly": 7*24} - if not period in period_hours: - if isinstance(period, int): - period_hours[period] = period - num_periods = int(np.ceil(len(hourly_rl_error) / (period_hours[period]*file['parent']['agg_dt']))) - periodic_acum_error = np.zeros(num_periods * period_hours[period]) - periodic_acum_error[:len(hourly_rl_error)] = hourly_rl_error[::file['parent']['agg_dt']] - periodic_acum_error = hourly_rl_error.reshape(num_periods, -1) - periodic_acum_error = np.cumsum(periodic_acum_error, axis=1).flatten() - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=periodic_acum_error/file['parent']['agg_dt'], name=f"Accumulated Hourly Error - {name}", visible='legendonly')) - - print(t) - return fig - - def plot_rewards(self, fig): - for file in self.parametrics: - with open(file["q_results"]) as f: - data = json.load(f) - - try: - data = data["horizon"] - except: - data = data["next"] - name = file["name"] - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["average_reward"], name=f"Average Reward - {name}", line_shape='hv', visible='legendonly')) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["cumulative_reward"], name=f"Cumulative Reward - {name}", line_shape='hv', visible='legendonly')) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["reward"], name=f"Reward - {name}", line_shape='hv', visible='legendonly'), secondary_y=True) - return fig - - def just_the_baseline(self, fig): - if len(self.baselines) == 0: - self.ref_log.logger.error("No baseline run files found for analysis.") - fig = self.plot_baseline(fig) - fig.update_layout(title_text="Baseline Summary") - return fig - def rl2baseline(self, fig): - if len(self.parametrics) == 0: - self.ref_log.logger.warning("No parameterized RL aggregator runs found for comparison to baseline.") - fig = self.just_the_baseline(fig) + if len(self.files) == 0: + self.log.logger.warning("No aggregator runs found for analysis.") return fig - # fig = self.plot_greedy(fig) fig = self.plot_baseline(fig) fig = self.plot_parametric(fig) fig.update_layout(title_text="RL Baseline Comparison") - return fig - - def rl2baseline_error(self, fig): - fig = self.plot_baseline_error(fig) - fig = self.plot_parametric_error(fig) - # fig = self.plot_rewards(fig) - fig.update_layout(title_text="RL Baseline Error Metrics") - return fig - - def q_values(self, fig): - with open(rl_q_file) as f: - data = json.load(f) - - x1 = [] - x2 = [] - for i in data["state"]: - if i[0] < 0: - x1.append(i[0]) - else: - x2.append(i[0]) - fig.add_trace(go.Scatter3d(x=x1, y=data["action"], z=data["q_obs"], mode="markers")) - fig.add_trace(go.Scatter3d(x=x2, y=data["action"], z=data["q_obs"], mode="markers")) - return fig - - def rl_qvals(self, fig): - for file in self.parametrics: - with open(file["q_results"]) as f: - data = json.load(f) - - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["q_pred"], name=f"Q predicted - {file['name']}", marker={'opacity':0.2})) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["q_obs"], name=f"Q observed - {file['name']}")) - - fig.update_layout(title_text="Critic Network") - fig.show() - return fig - - def rl_thetas(self, fig): - counter = 1 - for file in self.parametrics: - with open(file["q_results"]) as f: - data = json.load(f) - - data = data["horizon"] - theta = data["theta"] - - for i in range(len(data["theta"][0])): - y = [] - for j in range(file['parent']['ts']): - y.append(theta[j][i]) - fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=y, name=f"Theta_{i}", line_shape='hv', legendgroup=file['name'])) - counter += 1 - fig.update_layout(title_text="Critic Network Coefficients") + fig.update_layout( + title="Avg Daily Load Profile", + xaxis_title="Time of Day", + yaxis_title="Agg. Demand (kWh)",) return fig def all_rps(self, fig): - for file in self.parametrics: + for file in self.files: with open(file['results']) as f: data = json.load(f) @@ -765,7 +497,7 @@ def all_rps(self, fig): data = json.load(f) data = data["horizon"] mu = np.array(data["mu"]) - std = self.config['rl']['parameters']['exploration_rate'][0] + std = self.config['agg']['parameters']['exploration_rate'][0] delta = np.subtract(mu, rps) fig.add_trace(go.Histogram(x=delta, name=f"{file['name']}"), row=2, col=1) diff --git a/requirements.txt b/requirements.txt index 8996497..f2d91a3 100755 --- a/requirements.txt +++ b/requirements.txt @@ -44,3 +44,4 @@ Werkzeug xlrd zipp pathos +prettytable