Skip to content

Commit

Permalink
Shadow Pricing Enhancements (#7)
Browse files Browse the repository at this point in the history
* updated scripts to include simulation-based shadow pricing

* blacken

* Updated shadow_pricing.yaml for mtc example

* code cleanup

* more cleanup

* documentation and passing tests

* passing tests

* passing tests

* updated doc on shadow pricing

* 2nd Update model doc on shadow pricing

* more doc update on shadow pricing

* fixing pandas future warning

* blacken

* bug in trying to access shadow price settings when not running shadow pricing

* limiting pandas version

* always updating choices

* testing removal of lognormal for hh vot

* putting hh vot back in

* updating to match sharrow test versions

* raw person table for buffer instead of injectable

* adding segmentation, output by iteration, and external worker removal

* formatting & documentation

Co-authored-by: aletzdy <58451076+aletzdy@users.noreply.github.com>
  • Loading branch information
dhensle and aletzdy authored Oct 28, 2022
1 parent ee6f07d commit 630f1cb
Show file tree
Hide file tree
Showing 11 changed files with 821 additions and 90 deletions.
82 changes: 76 additions & 6 deletions activitysim/abm/models/location_choice.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ def location_sample(
DEST_MAZ = "dest_MAZ"


def aggregate_size_terms(dest_size_terms, network_los):
def aggregate_size_terms(dest_size_terms, network_los, model_settings):
#
# aggregate MAZ_size_terms to TAZ_size_terms
#
Expand Down Expand Up @@ -261,6 +261,21 @@ def aggregate_size_terms(dest_size_terms, network_los):
for c in weighted_average_cols:
TAZ_size_terms[c] /= TAZ_size_terms["size_term"] # weighted average

spc = shadow_pricing.load_shadow_price_calculator(model_settings)
if spc.use_shadow_pricing and (
spc.shadow_settings["SHADOW_PRICE_METHOD"] == "simulation"
):
# allow TAZs with at least one underassigned MAZ in them, therefore with a shadowprice larger than -999, to be selected again
TAZ_size_terms["shadow_price_utility_adjustment"] = np.where(
TAZ_size_terms["shadow_price_utility_adjustment"] > -999, 0, -999
)
# now, negative size term means shadow price is -999. Setting size_term to 0 so the prob of that MAZ being selected becomes 0
MAZ_size_terms["size_term"] = np.where(
MAZ_size_terms["shadow_price_utility_adjustment"] < 0,
0,
MAZ_size_terms["size_term"],
)

if TAZ_size_terms.isna().any(axis=None):
logger.warning(
f"TAZ_size_terms with NAN values\n{TAZ_size_terms[TAZ_size_terms.isna().any(axis=1)]}"
Expand Down Expand Up @@ -308,7 +323,9 @@ def location_presample(
alt_dest_col_name = model_settings["ALT_DEST_COL_NAME"]
assert DEST_TAZ != alt_dest_col_name

MAZ_size_terms, TAZ_size_terms = aggregate_size_terms(dest_size_terms, network_los)
MAZ_size_terms, TAZ_size_terms = aggregate_size_terms(
dest_size_terms, network_los, model_settings
)

# convert MAZ zone_id to 'TAZ' in choosers (persons_merged)
# persons_merged[HOME_TAZ] = persons_merged[HOME_MAZ].map(maz_to_taz)
Expand Down Expand Up @@ -860,6 +877,7 @@ def iterate_location_choice(

# chooser segmentation allows different sets coefficients for e.g. different income_segments or tour_types
chooser_segment_column = model_settings["CHOOSER_SEGMENT_COLUMN_NAME"]
segment_ids = model_settings["SEGMENT_IDS"]

assert (
chooser_segment_column in persons_merged_df
Expand All @@ -873,11 +891,38 @@ def iterate_location_choice(

for iteration in range(1, max_iterations + 1):

persons_merged_df_ = persons_merged_df.copy()

if spc.use_shadow_pricing and iteration > 1:
spc.update_shadow_prices()

choices_df, save_sample_df = run_location_choice(
persons_merged_df,
if spc.shadow_settings["SHADOW_PRICE_METHOD"] == "simulation":
# filter from the sampled persons
persons_merged_df_ = persons_merged_df_[
persons_merged_df_.index.isin(spc.sampled_persons.index)
]
# handle cases where a segment has persons but no zones to receive them
desired_size_sum = spc.desired_size[
spc.desired_size.index.isin(
spc.shadow_prices[spc.shadow_prices.iloc[:, 0] != -999].index
)
].sum()
zero_desired_size_segments = [
i for i in desired_size_sum.index if desired_size_sum[i] == 0
]
zero_desired_size_segments_ids = [
segment_ids[key] for key in zero_desired_size_segments
]
persons_merged_df_ = persons_merged_df_[
~persons_merged_df_[chooser_segment_column].isin(
zero_desired_size_segments_ids
)
]

persons_merged_df_ = persons_merged_df_.sort_index()

choices_df_, save_sample_df = run_location_choice(
persons_merged_df_,
network_los,
shadow_price_calculator=spc,
want_logsums=logsum_column_name is not None,
Expand All @@ -890,10 +935,35 @@ def iterate_location_choice(
trace_label=tracing.extend_trace_label(trace_label, "i%s" % iteration),
)

# choices_df is a pandas DataFrame with columns 'choice' and (optionally) 'logsum'
if choices_df is None:
# choices_df is a pandas DataFrame with columns "choice" and (optionally) "logsum"
if choices_df_ is None:
break

if spc.use_shadow_pricing:
# handle simulation method
if (
spc.shadow_settings["SHADOW_PRICE_METHOD"] == "simulation"
and iteration > 1
):
# if a process ends up with no sampled workers in it, hence an empty choice_df_, then choice_df wil be what it was previously
if len(choices_df_) == 0:
choices_df = choices_df
else:
choices_df = pd.concat([choices_df, choices_df_], axis=0)
choices_df_index = choices_df_.index.name
choices_df = choices_df.reset_index()
# update choices of workers/students
choices_df = choices_df.drop_duplicates(
subset=[choices_df_index], keep="last"
)
choices_df = choices_df.set_index(choices_df_index)
choices_df = choices_df.sort_index()
else:
choices_df = choices_df_.copy()

else:
choices_df = choices_df_

spc.set_choices(
choices=choices_df["choice"],
segment_ids=persons_merged_df[chooser_segment_column].reindex(
Expand Down
4 changes: 1 addition & 3 deletions activitysim/abm/models/trip_purpose.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,9 +74,7 @@ def choose_intermediate_trip_purpose(

# probs should sum to 1 across rows
sum_probs = probs_spec[purpose_cols].sum(axis=1)
probs_spec.loc[:, purpose_cols] = probs_spec.loc[:, purpose_cols].div(
sum_probs, axis=0
)
probs_spec[purpose_cols] = probs_spec[purpose_cols].div(sum_probs, axis=0)

# left join trips to probs (there may be multiple rows per trip for multiple depart ranges)
choosers = pd.merge(
Expand Down
Loading

0 comments on commit 630f1cb

Please sign in to comment.