Skip to content

Commit

Permalink
Adds two QAQC scripts to tools directory
Browse files Browse the repository at this point in the history
This merge adds two new scripts into /tools/ for use in QAQC.

Adds inundate_nation.py to produce inundation maps for the entire country for use in QAQC.
Adds check_deep_flooding.py to check for depths of inundation greater than a user-supplied threshold at specific areas defined by a user-supplied shapefile.

This resolves #439 and resolves #438.
  • Loading branch information
BradfordBates-NOAA authored Aug 11, 2021
1 parent 493dc27 commit f67a35d
Show file tree
Hide file tree
Showing 4 changed files with 226 additions and 5 deletions.
10 changes: 10 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,16 @@ All notable changes to this project will be documented in this file.
We follow the [Semantic Versioning 2.0.0](http://semver.org/) format.
<br/><br/>

## v3.0.20.0 - 2021-08-11 - [PR #440](https://github.com/NOAA-OWP/cahaba/pull/440)

This merge adds two new scripts into `/tools/` for use in QAQC.

## Additions
- `inundate_nation.py` to produce inundation maps for the entire country for use in QAQC.
- `check_deep_flooding.py` to check for depths of inundation greater than a user-supplied threshold at specific areas defined by a user-supplied shapefile.

<br/><br/>

## v3.0.19.5 - 2021-07-19

Updating `README.md`.
Expand Down
110 changes: 110 additions & 0 deletions tools/check_deep_flooding.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
import argparse
import os
from multiprocessing import Pool
import numpy as np
import rasterio.shutil
from rasterio.warp import calculate_default_transform, reproject, Resampling
import rasterio.crs
import rasterio
import rasterio.mask
import geopandas as gpd
from shapely.geometry import box


def check_deep_flooding(args):

depth_grid_path = args[0]
shapefile_path = args[1]
depth_threshold = args[2]
output_dir = args[3]

print("Checking " + depth_grid_path + "...")

# Open depth_grid_path and shapefile_path and perform np.wheres
depth_src = rasterio.open(depth_grid_path)
depth_array = depth_src.read(1)
reference = depth_src

#Read layer using the bbox option. CRS mismatches are handled if bbox is passed a geodataframe (which it is).
bounding_box = gpd.GeoDataFrame({'geometry': box(*reference.bounds)}, index=[0], crs=reference.crs)
poly_all = gpd.read_file(shapefile_path, bbox = bounding_box)

# Make sure features are present in bounding box area before projecting. Continue to next layer if features are absent.
if poly_all.empty:
return

#Project layer to reference crs.
poly_all_proj = poly_all.to_crs(reference.crs)
# check if there are any lakes within our reference raster extent.
if poly_all_proj.empty:
#If no features within reference raster extent, create a zero array of same shape as reference raster.
poly_mask = np.zeros(reference.shape)
else:
#Perform mask operation on the reference raster and using the previously declared geometry geoseries. Invert set to true as we want areas outside of poly areas to be False and areas inside poly areas to be True.
geometry = poly_all_proj.geometry
in_poly,transform,c = rasterio.mask.raster_geometry_mask(reference, geometry, invert = True)
#Write mask array, areas inside polys are set to 1 and areas outside poly are set to 0.
poly_mask = np.where(in_poly == True, 1,0)

# Filter depth_array by depth_threshold
filtered_depth_array = np.where(depth_array > depth_threshold, depth_array, -1)

# Perform mask.
masked_depth_array = np.where(poly_mask == 1, filtered_depth_array, -1)

if np.amax(masked_depth_array) > 0:

file_handle = os.path.split(depth_grid_path)[1]

checked_depth_raster = os.path.join(output_dir, "checked_" + str(depth_threshold) + "_" + file_handle)

print("Writing " + checked_depth_raster + "...")
# Write output.
with rasterio.Env():
profile = depth_src.profile
profile.update(nodata=-1)
with rasterio.open(checked_depth_raster, 'w', **profile) as dst:
dst.write(masked_depth_array, 1)


if __name__ == '__main__':

# Parse arguments.
parser = argparse.ArgumentParser(description='Checks for deep flooding in a specified shapefile. Requires a directory of depth grids and a shapefile.')
parser.add_argument('-d','--depth-grid-dir',help='Name of directory containing outputs of depth outputs of inundation.py',required=True)
parser.add_argument('-s','--shapefile-path',help='Path to shapefile to be used as the overlay.',required=True)
parser.add_argument('-t','--depth-threshold',help='Depth in meters to use as checking threshold.',required=True)
parser.add_argument('-o', '--output-dir',help='The path to a directory to write the outputs. If not used, the inundation_review directory is used by default -> type=str',required=True, default="")
parser.add_argument('-j', '--job-number',help='The number of jobs',required=False,default=1)

args = vars(parser.parse_args())

depth_grid_dir = args['depth_grid_dir']
shapefile_path = args['shapefile_path']
depth_threshold = int(args['depth_threshold'])
output_dir = args['output_dir']
job_number = int(args['job_number'])

# Get list of files in depth_grid_dir.
# Loop through files and determine which ones are depth grids, adding them to a list.

depth_grid_dir_list = os.listdir(depth_grid_dir)
if not os.path.exists(output_dir):
os.mkdir(output_dir)

procs_list = []

for f in depth_grid_dir_list:
if 'depth' in f:
full_f_path = os.path.join(depth_grid_dir, f)

# check_deep_flooding([full_f_path, shapefile_path, depth_threshold, output_dir])
procs_list.append([full_f_path, shapefile_path, depth_threshold, output_dir])

# Multiprocess.
with Pool(processes=job_number) as pool:
pool.map(check_deep_flooding, procs_list)




97 changes: 97 additions & 0 deletions tools/inundate_nation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
import argparse
import os

from inundation import inundate
from multiprocessing import Pool

INUN_REVIEW_DIR = r'/data/inundation_review/inundation_nwm_recurr/' # Will update.
INPUTS_DIR = r'/data/inputs'


def run_inundation(args):

fim_run_dir = args[0]
huc = args[1]
magnitude = args[2]
magnitude_output_dir = args[3]
config = args[4]

fim_run_parent = os.path.join(fim_run_dir, huc)
rem = os.path.join(fim_run_parent, 'rem_zeroed_masked.tif')
catchments = os.path.join(fim_run_parent, 'gw_catchments_reaches_filtered_addedAttributes.tif')
mask_type = 'huc'
catchment_poly = ''
hydro_table = os.path.join(fim_run_parent, 'hydroTable.csv')
catchment_poly = os.path.join(fim_run_parent, 'gw_catchments_reaches_filtered_addedAttributes_crosswalked.gpkg')
inundation_raster = os.path.join(magnitude_output_dir, magnitude + '_' + config + '_inund_extent.tif')
depth_raster = os.path.join(magnitude_output_dir, magnitude + '_' + config + '_inund_depth.tif')
forecast = os.path.join(INUN_REVIEW_DIR, 'nwm_recurr_flow_data', 'recurr_' + magnitude + '_cms.csv')
hucs, hucs_layerName = os.path.join(INPUTS_DIR, 'wbd', 'WBD_National.gpkg'), 'WBDHU8'

print(depth_raster)

if not os.path.exists(depth_raster):
print("Running the NWM recurrence intervals for HUC: " + huc + ", " + magnitude + "...")
inundate(
rem,catchments,catchment_poly,hydro_table,forecast,mask_type,hucs=hucs,hucs_layerName=hucs_layerName,
subset_hucs=huc,num_workers=1,aggregate=False,inundation_raster=None,inundation_polygon=None,
depths=depth_raster,out_raster_profile=None,out_vector_profile=None,quiet=True
)
else:
print(inundation_raster + " exists. Moving on...")


if __name__ == '__main__':

# Parse arguments.
parser = argparse.ArgumentParser(description='Inundation mapping for FOSS FIM using streamflow recurrence interflow data. Inundation outputs are stored in the /inundation_review/inundation_nwm_recurr/ directory.')
parser.add_argument('-r','--fim-run-dir',help='Name of directory containing outputs of fim_run.sh (e.g. data/ouputs/dev_abc/12345678_dev_test)',required=True)
parser.add_argument('-o', '--output-dir',help='The path to a directory to write the outputs. If not used, the inundation_review directory is used by default -> type=str',required=False, default="")
parser.add_argument('-j', '--job-number',help='The number of jobs',required=False,default=1)

args = vars(parser.parse_args())

fim_run_dir = args['fim_run_dir']
output_dir = args['output_dir']
magnitude_list = ['1_5']

job_number = int(args['job_number'])

huc_list = os.listdir(fim_run_dir)

print(fim_run_dir)
fim_version = os.path.split(fim_run_dir)[1]

print(fim_version)

print(output_dir)

if output_dir == "":
output_dir = os.path.join(INUN_REVIEW_DIR, fim_version)

print(output_dir)

if not os.path.exists(output_dir):
os.mkdir(output_dir)

if 'ms' in fim_version:
config = 'ms'
if 'fr' in fim_version:
config = 'fr'

procs_list = []

for huc in huc_list:
if huc != 'logs':
for magnitude in magnitude_list:
magnitude_output_dir = os.path.join(output_dir, magnitude + '_' + config)
if not os.path.exists(magnitude_output_dir):
os.mkdir(magnitude_output_dir)
print(magnitude_output_dir)
procs_list.append([fim_run_dir, huc, magnitude, magnitude_output_dir, config])

# Multiprocess.
if job_number > 1:
with Pool(processes=job_number) as pool:
pool.map(run_inundation, procs_list)

14 changes: 9 additions & 5 deletions tools/inundation_wrapper_nwm_flows.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
import shutil
from inundation import inundate

TEST_CASES_DIR = r'/data/inundation_review/inundation_nwm_recurr/' # Will update.
INUN_REVIEW_DIR = r'/data/inundation_review/inundation_nwm_recurr/' # Will update.
INPUTS_DIR = r'/data/inputs'
OUTPUTS_DIR = os.environ['outputDataDir']

Expand All @@ -24,13 +24,17 @@
WHITE_BOLD = '\033[37;1m'
CYAN_BOLD = '\033[36;1m'

def run_recurr_test(fim_run_dir, branch_name, huc_id, magnitude, mask_type='huc'):
def run_recurr_test(fim_run_dir, branch_name, huc_id, magnitude, mask_type='huc', output_dir=None):

# Construct paths to development test results if not existent.
huc_id_dir_parent = os.path.join(TEST_CASES_DIR, huc_id)
huc_id_dir_parent = os.path.join(INUN_REVIEW_DIR, huc_id)
if not os.path.exists(huc_id_dir_parent):
os.mkdir(huc_id_dir_parent)
branch_test_case_dir_parent = os.path.join(TEST_CASES_DIR, huc_id, branch_name)

if output_dir == None:
branch_test_case_dir_parent = os.path.join(INUN_REVIEW_DIR, huc_id, branch_name)
else:
branch_test_case_dir_parent = os.path.join(output_dir, huc_id, branch_name)

# Delete the entire directory if it already exists.
if os.path.exists(branch_test_case_dir_parent):
Expand Down Expand Up @@ -77,7 +81,7 @@ def run_recurr_test(fim_run_dir, branch_name, huc_id, magnitude, mask_type='huc'

# Define paths to inundation_raster and forecast file.
inundation_raster = os.path.join(branch_test_case_dir, branch_name + '_inund_extent.tif')
forecast = os.path.join(TEST_CASES_DIR, 'nwm_recurr_flow_data', 'recurr_' + magnitude + '_cms.csv')
forecast = os.path.join(INUN_REVIEW_DIR, 'nwm_recurr_flow_data', 'recurr_' + magnitude + '_cms.csv')

# Run inundate.
print("-----> Running inundate() to produce modeled inundation extent for the " + magnitude + " magnitude...")
Expand Down

0 comments on commit f67a35d

Please sign in to comment.