Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revised shadow pricing mechanism #613

Merged
merged 30 commits into from
Dec 21, 2022
Merged
Show file tree
Hide file tree
Changes from 25 commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
b93e6fe
updated scripts to include simulation-based shadow pricing
aletzdy Sep 30, 2022
11600c3
blacken
dhensle Sep 30, 2022
4b9f55e
Updated shadow_pricing.yaml for mtc example
aletzdy Sep 30, 2022
29f893e
Merge branch 'develop_RevisedShadowPricing' of https://github.com/ale…
dhensle Sep 30, 2022
846d3be
code cleanup
dhensle Sep 30, 2022
e2dde11
more cleanup
dhensle Sep 30, 2022
3863396
documentation and passing tests
dhensle Sep 30, 2022
3d1edc7
passing tests
dhensle Sep 30, 2022
c4dc4ea
passing tests
dhensle Oct 1, 2022
23df7b9
updated doc on shadow pricing
aletzdy Oct 3, 2022
4269dd5
2nd Update model doc on shadow pricing
aletzdy Oct 3, 2022
3a87acd
more doc update on shadow pricing
aletzdy Oct 3, 2022
2db5f59
fixing pandas future warning
dhensle Oct 3, 2022
b5768e3
blacken
dhensle Oct 3, 2022
30d67bc
bug in trying to access shadow price settings when not running shadow…
dhensle Oct 3, 2022
a20a396
limiting pandas version
dhensle Oct 3, 2022
4d58a85
always updating choices
dhensle Oct 3, 2022
7c706f7
testing removal of lognormal for hh vot
dhensle Oct 3, 2022
e2e0d9d
putting hh vot back in
dhensle Oct 3, 2022
95d7fd7
updating to match sharrow test versions
dhensle Oct 4, 2022
1ce6d36
raw person table for buffer instead of injectable
dhensle Oct 7, 2022
7167b65
adding segmentation, output by iteration, and external worker removal
dhensle Oct 25, 2022
bce64bd
formatting & documentation
dhensle Oct 27, 2022
ebd2bc8
ensuring TAZ is not selected if no available MAZ
dhensle Nov 4, 2022
aa8d5a3
adding logic to skip external location choice models
dhensle Nov 4, 2022
a79ac7b
consistent multiprocessing results
dhensle Dec 15, 2022
9190535
Merge branch 'develop' into develop_RevisedShadowPricing
dhensle Dec 15, 2022
0d90f87
blacken
dhensle Dec 15, 2022
0898686
Merge branch 'develop_RevisedShadowPricing' of https://github.com/ale…
dhensle Dec 15, 2022
3a78433
updating regression trips
dhensle Dec 15, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
92 changes: 86 additions & 6 deletions activitysim/abm/models/location_choice.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ def location_sample(
DEST_MAZ = "dest_MAZ"


def aggregate_size_terms(dest_size_terms, network_los):
def aggregate_size_terms(dest_size_terms, network_los, model_settings):
#
# aggregate MAZ_size_terms to TAZ_size_terms
#
Expand All @@ -243,6 +243,13 @@ def aggregate_size_terms(dest_size_terms, network_los):
)
MAZ_size_terms[DEST_TAZ] = MAZ_size_terms.index.map(maz_to_taz)

MAZ_size_terms["avail_MAZ"] = np.where(
(MAZ_size_terms.size_term > 0)
& (MAZ_size_terms.shadow_price_utility_adjustment > -999),
1,
0,
)

weighted_average_cols = [
"shadow_price_size_term_adjustment",
"shadow_price_utility_adjustment",
Expand All @@ -261,6 +268,24 @@ def aggregate_size_terms(dest_size_terms, network_los):
for c in weighted_average_cols:
TAZ_size_terms[c] /= TAZ_size_terms["size_term"] # weighted average

spc = shadow_pricing.load_shadow_price_calculator(model_settings)
if spc.use_shadow_pricing and (
spc.shadow_settings["SHADOW_PRICE_METHOD"] == "simulation"
):
# allow TAZs with at least one underassigned MAZ in them, therefore with a shadowprice larger than -999, to be selected again
TAZ_size_terms["shadow_price_utility_adjustment"] = np.where(
(TAZ_size_terms["shadow_price_utility_adjustment"] > -999)
& (TAZ_size_terms["avail_MAZ"] > 0),
0,
-999,
)
# now, negative size term means shadow price is -999. Setting size_term to 0 so the prob of that MAZ being selected becomes 0
MAZ_size_terms["size_term"] = np.where(
MAZ_size_terms["shadow_price_utility_adjustment"] < 0,
0,
MAZ_size_terms["size_term"],
)

if TAZ_size_terms.isna().any(axis=None):
logger.warning(
f"TAZ_size_terms with NAN values\n{TAZ_size_terms[TAZ_size_terms.isna().any(axis=1)]}"
Expand Down Expand Up @@ -308,7 +333,9 @@ def location_presample(
alt_dest_col_name = model_settings["ALT_DEST_COL_NAME"]
assert DEST_TAZ != alt_dest_col_name

MAZ_size_terms, TAZ_size_terms = aggregate_size_terms(dest_size_terms, network_los)
MAZ_size_terms, TAZ_size_terms = aggregate_size_terms(
dest_size_terms, network_los, model_settings
)

# convert MAZ zone_id to 'TAZ' in choosers (persons_merged)
# persons_merged[HOME_TAZ] = persons_merged[HOME_MAZ].map(maz_to_taz)
Expand Down Expand Up @@ -856,6 +883,7 @@ def iterate_location_choice(

# chooser segmentation allows different sets coefficients for e.g. different income_segments or tour_types
chooser_segment_column = model_settings["CHOOSER_SEGMENT_COLUMN_NAME"]
segment_ids = model_settings["SEGMENT_IDS"]

assert (
chooser_segment_column in persons_merged_df
Expand All @@ -869,11 +897,38 @@ def iterate_location_choice(

for iteration in range(1, max_iterations + 1):

persons_merged_df_ = persons_merged_df.copy()

if spc.use_shadow_pricing and iteration > 1:
spc.update_shadow_prices()

choices_df, save_sample_df = run_location_choice(
persons_merged_df,
if spc.shadow_settings["SHADOW_PRICE_METHOD"] == "simulation":
# filter from the sampled persons
persons_merged_df_ = persons_merged_df_[
persons_merged_df_.index.isin(spc.sampled_persons.index)
]
# handle cases where a segment has persons but no zones to receive them
desired_size_sum = spc.desired_size[
spc.desired_size.index.isin(
spc.shadow_prices[spc.shadow_prices.iloc[:, 0] != -999].index
)
].sum()
zero_desired_size_segments = [
i for i in desired_size_sum.index if desired_size_sum[i] == 0
]
zero_desired_size_segments_ids = [
segment_ids[key] for key in zero_desired_size_segments
]
persons_merged_df_ = persons_merged_df_[
~persons_merged_df_[chooser_segment_column].isin(
zero_desired_size_segments_ids
)
]

persons_merged_df_ = persons_merged_df_.sort_index()

choices_df_, save_sample_df = run_location_choice(
persons_merged_df_,
network_los,
shadow_price_calculator=spc,
want_logsums=logsum_column_name is not None,
Expand All @@ -886,10 +941,35 @@ def iterate_location_choice(
trace_label=tracing.extend_trace_label(trace_label, "i%s" % iteration),
)

# choices_df is a pandas DataFrame with columns 'choice' and (optionally) 'logsum'
if choices_df is None:
# choices_df is a pandas DataFrame with columns "choice" and (optionally) "logsum"
if choices_df_ is None:
break

if spc.use_shadow_pricing:
# handle simulation method
if (
spc.shadow_settings["SHADOW_PRICE_METHOD"] == "simulation"
and iteration > 1
):
# if a process ends up with no sampled workers in it, hence an empty choice_df_, then choice_df wil be what it was previously
if len(choices_df_) == 0:
choices_df = choices_df
else:
choices_df = pd.concat([choices_df, choices_df_], axis=0)
choices_df_index = choices_df_.index.name
choices_df = choices_df.reset_index()
# update choices of workers/students
choices_df = choices_df.drop_duplicates(
subset=[choices_df_index], keep="last"
)
choices_df = choices_df.set_index(choices_df_index)
choices_df = choices_df.sort_index()
else:
choices_df = choices_df_.copy()

else:
choices_df = choices_df_

spc.set_choices(
choices=choices_df["choice"],
segment_ids=persons_merged_df[chooser_segment_column].reindex(
Expand Down
4 changes: 1 addition & 3 deletions activitysim/abm/models/trip_purpose.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,7 @@ def choose_intermediate_trip_purpose(

# probs should sum to 1 across rows
sum_probs = probs_spec[purpose_cols].sum(axis=1)
probs_spec.loc[:, purpose_cols] = probs_spec.loc[:, purpose_cols].div(
sum_probs, axis=0
)
probs_spec[purpose_cols] = probs_spec[purpose_cols].div(sum_probs, axis=0)

# left join trips to probs (there may be multiple rows per trip for multiple depart ranges)
choosers = pd.merge(
Expand Down
Loading