Page MenuHomec4science

Scenarios_hourly.py
No OneTemporary

File Metadata

Created
Fri, May 17, 00:31

Scenarios_hourly.py

#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import xarray as xr
import os
import time
import sys
### INPUTS
YEAR = int(sys.argv[1])
### FILE PATHS
SCENARIO_FP = '/work/hyenergy/output/solar_potential/scenarios_2050'
PV_FP = '/work/hyenergy/output/solar_potential/hourly_potential/'
hourly_FP_EW = '/scratch/walch/workdir_solar_EW_scenarios'
hourly_FP = '/scratch/walch/workdir_solar_hourly'
# # Load data
# Load base scenario and EW roofs
roofs_EW = pd.read_csv(os.path.join(PV_FP, '%d_annual_roofs_flat_EW.csv' %(YEAR)), index_col = 0)
roofs = pd.read_csv(os.path.join(PV_FP, '%d_annual_roofs.csv' %(YEAR)), index_col = 0)
# Create annual dataset of full EW scenario
annual_EW = roofs.copy()
annual_EW.loc[ roofs_EW.index ] = roofs_EW.copy()
# Load hourly data
PV_hourly_EW = xr.open_mfdataset(os.path.join( hourly_FP_EW, 'tmp_%d' %YEAR, 'pv_potential_*.nc'),
chunks = {'DF_UID' : 10000})
PV_hourly_base = xr.open_mfdataset(os.path.join( hourly_FP, 'tmp_%d' %YEAR, 'pv_potential_*.nc'),
chunks = {'DF_UID' : 10000})
# Load scenario file
scenarios = pd.read_csv(os.path.join( SCENARIO_FP, 'MMH_cumsum_for_scenarios.csv' ), index_col = 0)
# # Loop over communes (scenario: all roofs)
# SCENARIO DEFINITION
MIN_AREA = 8
# no restriction for roof tilt and roof aspect
commune_pot_EW = {}
tt = time.time()
for i, commune in enumerate(roofs.BFS_NUMMER.unique()):
# Baseline scenario
DF_UIDs = roofs[(roofs.BFS_NUMMER == commune) & (roofs.available_area >= MIN_AREA)].index
commune_pot_base = PV_hourly_base.sel(DF_UID = DF_UIDs).pv_potential.sum(dim = 'DF_UID').to_series()
# Baseline scenario (flat roofs only)
DF_UIDs_flat = roofs_EW[roofs_EW.index.isin( DF_UIDs )].index
commune_pot_flat_base = PV_hourly_base.sel(DF_UID = DF_UIDs_flat).pv_potential.sum(dim = 'DF_UID').to_series()
# EW scenario (flat roofs only)
DF_UIDs_flat_EW = roofs_EW[(roofs_EW.BFS_NUMMER == commune) & (roofs_EW.available_area >= MIN_AREA)].index
commune_pot_flat_EW = PV_hourly_EW.sel(DF_UID = DF_UIDs_flat_EW).pv_potential.sum(dim = 'DF_UID').to_series()
commune_pot_EW[commune] = commune_pot_base - commune_pot_flat_base + commune_pot_flat_EW
if ((i+1) % 50) == 0:
print('Processed commune %d of %d in %.2fs' %((i+1), len( roofs.BFS_NUMMER.unique()), time.time()-tt))
tt = time.time()
# Merge all commune potentials
commune_pot_EW = pd.DataFrame(commune_pot_EW)
# # Save output data
outfile = ( os.path.join( SCENARIO_FP, '%d_PV_communes_all.csv' %YEAR) )
(commune_pot_EW/1e6).to_csv( outfile )
# # Scenarios:
def aggregate_scenario_to_communes(roofs_base, roofs_flat_EW, PV_hourly_base, PV_hourly_EW, DF_UID_scenario,
MIN_AREA = 8):
# Select all roofs in DF_UID_scenario for the EW option for flat roofs and all roofs < 10° considered flat
# All roofs in DF_UID_scenario are considered suitable except those with an available area < MIN_AREA
# IMPORTANT: the roofs_base and roofs_flat_EW must have the DF_UID as index,
# and the BFS_NUMMER as commune ID and the available_area as attributes
commune_pot_EW = {}
suitable_DF_UIDs_commune = lambda roofs, communeID: roofs[ (roofs.BFS_NUMMER == communeID)
& (roofs.available_area >= MIN_AREA)
& (roofs.index.isin( DF_UID_scenario )) ].index
commune_IDs = roofs_base.BFS_NUMMER.unique()
tt = time.time()
for i, commune in enumerate(commune_IDs):
# Baseline scenario
DF_UIDs = suitable_DF_UIDs_commune( roofs_base, commune )
commune_pot_base = PV_hourly_base.sel(DF_UID = DF_UIDs).pv_potential.sum(dim = 'DF_UID').to_series()
# Baseline scenario (flat roofs only)
DF_UIDs_flat = roofs_flat_EW[ roofs_flat_EW.index.isin( DF_UIDs ) ].index
commune_pot_flat_base = PV_hourly_base.sel(DF_UID = DF_UIDs_flat).pv_potential.sum(dim = 'DF_UID').to_series()
# EW scenario (flat roofs only)
DF_UIDs_flat_EW = suitable_DF_UIDs_commune( roofs_flat_EW, commune )
commune_pot_flat_EW = PV_hourly_EW.sel( DF_UID = DF_UIDs_flat_EW).pv_potential.sum(dim = 'DF_UID').to_series()
commune_pot_EW[commune] = commune_pot_base - commune_pot_flat_base + commune_pot_flat_EW
if ((i+1) % 20) == 0:
print('Processed commune %d of %d in %.2fs' %((i+1), len( commune_IDs ), time.time()-tt))
tt = time.time()
# Merge all commune potentials
return pd.DataFrame(commune_pot_EW) / 1e6
for target_PV in [15, 25, 35]:
tt = time.time()
scenario_UIDs = scenarios[scenarios.EPV_cumsum_TWh <= target_PV].index.values
commune_PV_scenario = aggregate_scenario_to_communes( roofs, roofs_EW,
PV_hourly_base, PV_hourly_EW, scenario_UIDs )
outfile = ( os.path.join( SCENARIO_FP, '%d_PV_communes_%d_TWh.csv' %(YEAR, target_PV) ) )
commune_PV_scenario.to_csv( outfile )
print('Computed and saved %s in %.2fs' %(outfile, time.time()-tt))

Event Timeline