diff --git a/notebooks/MC_RAPTOR.ipynb b/notebooks/MC_RAPTOR.ipynb index 8eb5fd5..0d04121 100644 --- a/notebooks/MC_RAPTOR.ipynb +++ b/notebooks/MC_RAPTOR.ipynb @@ -1,1492 +1,984 @@ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# McRAPTOR for latest departure problem \n", "\n", "Multiple-criteria adaptation of RAPTOR (Round bAsed Public Transit Optimized Router) algorithm (cf. [README](../README.md)).\n", "\n", "## Left out at this stage:\n", "\n", "- Realistic time to get out of one transport and walk to the platform of the next. Instead, we just set it to 2 minutes, no matter what.\n", "We also reserve 2 minutes for walking from the entrance of a station to the correct platform, and vice versa, when walking between stations (total: `time_to_walk_distance + 4 minutes`)\n", "\n", "# Run the algorithm\n", "\n", "Please make sure that you follow these steps:\n", "- Execute `git lfs pull` in a terminal (if you haven't already).\n", "- Execute the code cells in the other sections of this notebook (to load the data and define the classes and functions that make up the algorithm).\n", "- In the cell below, follow instructions 1) - 3)\n", "- Run the cells in this section of the notebook." ] }, { "cell_type": "code", - "execution_count": 27, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Searching for journeys from Kloten, Lindenstrasse (stop 8580434) to Spital Zollikerberg (stop 8503077)\n", - "\n", - " STARTING round k = 1\n", - "Marked stops at the start of the round: [8503077, 8503078, 8590879, 8591024, 8591023, 8591028, 8591903]\n", - "\n", - " STARTING round k = 2\n", - "Number of marked stops at the start of the round: 72\n", - "Found 1 non-dominated journeys with 2 trips.\n", - "Will search for journeys with up to 3 trips.\n", - "\n", - " STARTING round k = 3\n", - "Number of marked stops at the start of the round: 543\n", - "Found 1 non-dominated journeys with 3 trips.\n", - "\n", - "*************** THE END ***************\n", - "There is a solution with 2 trips.\n", - "We shall not search for journeys with 4 or more trips.\n" - ] - } - ], + "outputs": [], "source": [ "# Make sure to execute the code cells of the other sections first!\n", "\n", "# 1) Define stop_id_start and stop_id_destination (as strings starting with 7 digits),\n", "# by replacinig the right hand side below.\n", "# - acceptable formats (examples for stop 1234567):\n", "# '1234567',\n", "# '1234567:0:6EF',\n", "# '1234567890314'\n", "stop_id_start = gen_random_stop_id() # replace with stop_id for start stop\n", "stop_id_destination = gen_random_stop_id() # replace with stop_id for target stop\n", "\n", "# 2) Define arrival_time_max (as a string)\n", "# - only acceptable format (two examples):\n", "# '09:42' for 9:42 AM\n", "# '15:06' for 3:06 PM\n", "arrival_time_max = '17:30' # replace with desired arrival time\n", "\n", "# 3) Define Pr_min: the minimum acceptable overall probability of success\n", "# - format: float in open interval (0, 1)\n", "Pr_min = 0.9\n", "\n", - "stop_id_start = str(stop_ids[0])\n", - "stop_id_destination = str(stop_ids[stops.shape[0]-1])\n", - "\n", "incoherences = {}\n", "bags_p_s, bags = run_mc_raptor(stop_id_start,\n", " stop_id_destination,\n", " arrival_time_max,\n", " Pr_min\n", " , incoherences\n", " )" ] }, { "cell_type": "code", - "execution_count": 28, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{}" - ] - }, - "execution_count": 28, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "incoherences" ] }, { "cell_type": "code", - "execution_count": 29, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Showing Pareto set of non-dominated journeys\n", - "from Kloten, Lindenstrasse (stop 8580434) to Spital Zollikerberg (stop 8503077)\n", - "with criteria:\n", - " - maximize departure time\n", - " - maximize probability of success\n", - " - minimize number of individual trips\n", - "and constraints:\n", - " - arrival time at least 2 minutes minutes before 17:30 (2 minutes to walk from the platform to the extraction point).\n", - " - probability of success at least 0.9000.\n", - "\n", - "Journeys are sorted by descending departure time.\n", - "\n", - "\n", - " ---------- OPTION 1 ----------\n", - "Departure stop: Kloten, Lindenstrasse (stop 8580434)\n", - "Departure time: 16:13\n", - "Number of trips used: 2\n", - "Probability of success: 1.0000\n", - " Walk 9.4 minutes from Kloten, Lindenstrasse (stop 8580434)\n", - " to Kloten (stop 8503308)\n", - " At Kloten (stop 8503308)\n", - " take the S-Bahn to Rapperswil\n", - " leaving at 16:25 (route id: 26-7-A-j19-1).\n", - " Get out at Zürich Stadelhofen (stop 8503003)\n", - " at 16:44.\n", - " Walk 1.1 minutes from Zürich Stadelhofen (stop 8503003)\n", - " to Zürich Stadelhofen FB (stop 8503059)\n", - " At Zürich Stadelhofen FB (stop 8503059)\n", - " take the S-Bahn to Forch\n", - " leaving at 17:03 (route id: 26-18-j19-1).\n", - " Get out at Spital Zollikerberg (stop 8503077)\n", - " at 17:15.\n", - "Target stop: Spital Zollikerberg (stop 8503077)\n", - "Requested arrival time: 17:30\n", - "\n", - "\n", - " ---------- OPTION 2 ----------\n", - "Departure stop: Kloten, Lindenstrasse (stop 8580434)\n", - "Departure time: 15:43\n", - "Number of trips used: 3\n", - "Probability of success: 1.0000\n", - " Walk 9.4 minutes from Kloten, Lindenstrasse (stop 8580434)\n", - " to Kloten (stop 8503308)\n", - " At Kloten (stop 8503308)\n", - " take the S-Bahn to Rapperswil\n", - " leaving at 15:55 (route id: 26-7-A-j19-1).\n", - " Get out at Zürich HB (stop 8503000)\n", - " at 16:10.\n", - " At Zürich HB (stop 8503000)\n", - " take the S-Bahn to Rapperswil\n", - " leaving at 16:39 (route id: 26-15-j19-1).\n", - " Get out at Zürich Stadelhofen (stop 8503003)\n", - " at 16:41.\n", - " Walk 1.1 minutes from Zürich Stadelhofen (stop 8503003)\n", - " to Zürich Stadelhofen FB (stop 8503059)\n", - " At Zürich Stadelhofen FB (stop 8503059)\n", - " take the S-Bahn to Forch\n", - " leaving at 17:03 (route id: 26-18-j19-1).\n", - " Get out at Spital Zollikerberg (stop 8503077)\n", - " at 17:15.\n", - "Target stop: Spital Zollikerberg (stop 8503077)\n", - "Requested arrival time: 17:30\n" - ] - } - ], + "outputs": [], "source": [ "print_solutions(bags_p_s)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Load the data\n", "### General considerations\n", "We adhere to the data structures proposed by Delling et al. These structures aim to minimize read times in memory by making use of consecutive in-memory adresses. Thus, structures with varying dimensions (e.g dataframes, python lists) are excluded. We illustrate the difficulty with an example. \n", "\n", "Each route has a potentially unique number of stops. Therefore, we cannot store stops in a 2D array of routes by stops, as the number of stops is not the same for each route. We adress this problem by storing stops consecutively by route, and keeping track of the index of the first stop for each route.\n", "\n", "This general strategy is applied to all the required data structures, where possible.\n", "\n", "### routes\n", "The `routes` array will contain arrays `[n_trips, n_stops, pt_1st_stop, pt_1st_trip]` where all four values are `int`. To avoid overcomplicating things and try to mimic pointers in python, `pt_1st_stop` and `pt_1st_trip` contain integer indices." ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { "lines_to_next_cell": 0 }, "outputs": [], "source": [ "import numpy as np\n", "import pickle\n", "\n", "def pkload(path):\n", " with open(path, 'rb') as f:\n", " obj = pickle.load(f)\n", " return obj" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "lines_to_next_cell": 0 }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(1461, 4)\n" - ] - }, - { - "data": { - "text/plain": [ - "array([[ 1, 26, 0, 0],\n", - " [ 1, 8, 26, 26],\n", - " [ 1, 17, 34, 34],\n", - " ...,\n", - " [ 1, 3, 15362, 260396],\n", - " [ 2, 16, 15365, 260399],\n", - " [ 1, 28, 15381, 260431]], dtype=uint32)" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "routes = pkload(\"../data/routes_array_cyril.pkl\").astype(np.uint32)\n", "print(routes.shape)\n", "routes" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### routeStops\n", "`routeStops` is an array that contains the ordered lists of stops for each route. `pt_1st_stop` in `routes` is required to get to the first stop of the route. is itself an array that contains the sequence of stops for route $r_i$." ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(15409,)\n", - "1406\n" - ] - }, - { - "data": { - "text/plain": [ - "array([1221, 816, 776, ..., 1349, 1037, 552], dtype=uint16)" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "routeStops = pkload(\"../data/route_stops_array_cyril.pkl\").astype(np.uint16)\n", "print(routeStops.shape)\n", "print(routeStops.max())\n", "routeStops" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### stopTimes\n", "\n", "The i-th entry in the `stopTimes` array is itself an array which contains the arrival and departure time at a particular stop for a particular trip. `stopTimes` is sorted by routes, and then by trips. We retrieve the index of the first (earliest) trip of the route with the pointer `pt_1st_trip` stored in `routes`. We may use the built-in `numpy` [date and time data structures](https://blog.finxter.com/how-to-work-with-dates-and-times-in-python/). In short, declaring dates and times is done like this: `np.datetime64('YYYY-MM-DDThh:mm')`. Entries with a `NaT` arrival or departure times correspond to beginning and end of trips respectively.\n", "\n", "Note that trips are indexed implicitely in stopTimes, but we decided to change a little bit from the paper and index them according to their parent route instead of giving them an absolute index. It makes things a bit easier when coding the algorithm." ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(260459, 2)\n" - ] - }, - { - "data": { - "text/plain": [ - "array([[ 'NaT', '2020-05-24T07:00:00.000000000'],\n", - " ['2020-05-24T07:01:00.000000000', '2020-05-24T07:01:00.000000000'],\n", - " ['2020-05-24T07:02:00.000000000', '2020-05-24T07:02:00.000000000'],\n", - " ...,\n", - " ['2020-05-24T07:35:00.000000000', '2020-05-24T07:35:00.000000000'],\n", - " ['2020-05-24T07:36:00.000000000', '2020-05-24T07:36:00.000000000'],\n", - " ['2020-05-24T07:37:00.000000000', 'NaT']],\n", - " dtype='datetime64[ns]')" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "stopTimes = pkload(\"../data/stop_times_array_cyril.pkl\")\n", "print(stopTimes.shape)\n", "stopTimes" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "`NaT` is the `None` equivalent for `numpy datetime64`." ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ True False]\n" - ] - } - ], + "outputs": [], "source": [ "print(np.isnat(stopTimes[0]))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### stopRoutes\n", "\n", "`stopRoutes` contains the routes (as `int`s representing an index in `routes`) associated with each stop. We need the pointer in `stops` to index `stopRoutes` correctly." ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(15344,)\n" - ] - }, - { - "data": { - "text/plain": [ - "array([ 17, 116, 126, ..., 861, 982, 1087], dtype=uint32)" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "stopRoutes = pkload(\"../data/stop_routes_array_cyril.pkl\").flatten().astype(np.uint32)\n", "print(stopRoutes.shape)\n", "stopRoutes" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### transfers\n", "`transfers` is a 2D `np.ndarray` where each entry `[p_j, time]` represents (in seconds) the time it takes to walk from stop p_j to the implicitely given stop p_i.\n", "p_i is given implicitely by the indexing, in conjunction with `stops`. In other words:\n", "`transfers[stops[p_i][2]:stops[p_i][3]]` returns all the footpaths arriving at stop p_i.\n", "\n", "As we cannot store different data types in numpy arras, `time` will have to be converted to `np.timedelta64`, the format used to make differences between `np.datetime.64` variables. We will consider all `time` values as **positive values in seconds**." ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(6264, 2)\n" - ] - }, - { - "data": { - "text/plain": [ - "array([[ 815, 267],\n", - " [1350, 569],\n", - " [ 63, 470],\n", - " ...,\n", - " [1113, 382],\n", - " [1122, 338],\n", - " [1270, 553]], dtype=uint16)" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "transfers = pkload(\"../data/transfer_array_cyril.pkl\").astype(np.uint16)\n", "print(transfers.shape)\n", "transfers" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### stops\n", "\n", "`stops` stores the indices in `stopRoutes` and in `transfers` corresponding to each stop.\n", "\n", "`stopRoutes[stops[p][0]:stops[p][1]]` returns the routes serving stop p.\n", "\n", "`transfers[stops[p][2]:stops[p][3]]` returns the footpaths arriving at stop p." ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ 0 75]\n", - "[0 0]\n", - "(1407, 2)\n", - "[ 0 0 0 75]\n" - ] - }, - { - "data": { - "text/plain": [ - "array([[ 0, 11, 0, 2],\n", - " [ 11, 20, 2, 7],\n", - " [ 20, 38, 7, 22],\n", - " ...,\n", - " [15303, 15334, 6242, 6250],\n", - " [15334, 15339, 6250, 6257],\n", - " [15339, 15344, 6257, 6264]], dtype=uint32)" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "stops = pkload(\"../data/stops_array_cyril.pkl\")\n", "stopRoutes = pkload(\"../data/stop_routes_array_cyril.pkl\")\n", "print(np.isnan(stops.astype(np.float64)).sum(axis=0))\n", "print(np.equal(stops, None).sum(axis=0))\n", "print(stops.shape)\n", "stops = stops[:,[0,0,1,1]]\n", "# Make column 1 contain the start_index of the next stop in stopRoutes\n", "stops[:-1,1] = stops[1:,0]\n", "stops[-1, 1] = stopRoutes.shape[0]\n", "# Deal with NaN's in column 2 (for stops with 0 foot transfers within 500m)\n", "if np.isnan(stops[-1,2]).item():\n", " stops[-1,2] = transfers.shape[0]\n", "for i in np.isnan(stops[:-1,2].astype(np.float64)).nonzero()[0][::-1]:\n", " stops[i,2] = stops[i+1,2]\n", "print(np.isnan(stops.astype(np.float64)).sum(axis=0))\n", "# Make column 3 contain the start_index of the next stop in stopRoutes\n", "stops[:-1,3] = stops[1:,2]\n", "stops[-1, 3] = transfers.shape[0]\n", "# Convert to int\n", "stops = stops.astype(np.uint32)\n", "stops" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Example" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "routes serving stop 0: [ 17 116 126 144 169 250 267 356 573 617 1054]\n", - "stops of route 17: [ 168 1365 504 434 715 454 1236 186 959 81 130 774 284 958\n", - " 815 0 1350 265 780 305 490 16 180 1397]\n", - "stops of route 116: [1397 180 16 490 305 780 265 1350 0 815 958 284 774 130\n", - " 81 959 186 1236 454]\n", - "stops of route 126: [ 0 1350 265 780 490 16 180 1397]\n", - "stops of route 144: [1397 180 16 490 780 265 1350 0 815 958 284 774 130 81\n", - " 959 186 1236 454 715 434 504 1365 168]\n", - "stops of route 169: [ 0 1350 265 780 305 490 16 180 1397]\n", - "stops of route 250: [ 0 815 958 284 774 130 81 959 186 1236 454 715 434 504\n", - " 1365 168]\n", - "stops of route 267: [1397 180 16 490 780 265 1350 0 815 958 284 774 130 81\n", - " 959 186 1236 454]\n", - "stops of route 356: [1397 180 16 490 305 780 265 1350 0 815 958 284 774 130\n", - " 81 959 186 1236 454 715 434 504 1365 168]\n", - "stops of route 573: [ 454 1236 186 959 81 130 774 284 958 815 0 1350 265 780\n", - " 490 16 180 1397]\n", - "stops of route 617: [ 454 1236 186 959 81 130 774 284 958 815 0 1350 265 780\n", - " 305 490 16 180 1397]\n", - "stops of route 1054: [ 168 1365 504 434 715 454 1236 186 959 81 130 774 284 958\n", - " 815 0 1350 265 780 490 16 180 1397]\n", - "stop 0 can be reached from stop 815 by walking for 267 seconds.\n", - "stop 0 can be reached from stop 1350 by walking for 569 seconds.\n" - ] - } - ], + "outputs": [], "source": [ "p = 0\n", "routes_serving_p = stopRoutes[stops[p][0]:stops[p][1]]\n", "print(\"routes serving stop 0:\", routes_serving_p)\n", "for r in routes_serving_p:\n", " print(\"stops of route {}:\".format(r), routeStops[routes[r][2]:routes[r][2]+routes[r][1]])\n", "for pPrime, walking_seconds in transfers[stops[p][2]:stops[p][3]]:\n", " print(\"stop {} can be reached from stop {} by walking for {} seconds.\".format(p, pPrime, walking_seconds))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Distribution of delays" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([[0.03314917, 0.88950276, 0.97237569, 0.98895028, 0.98895028,\n", - " 0.98895028, 0.99447514, 0.99447514, 0.99447514, 1. ,\n", - " 1. , 1. , 1. , 1. , 1. ,\n", - " 1. , 1. , 1. , 1. , 1. ,\n", - " 1. , 1. , 1. , 1. , 1. ,\n", - " 1. , 1. , 1. , 1. , 1. ,\n", - " 1. , 1. ],\n", - " [0. , 0.85082873, 0.95027624, 0.98895028, 0.98895028,\n", - " 0.98895028, 0.98895028, 0.99447514, 0.99447514, 0.99447514,\n", - " 1. , 1. , 1. , 1. , 1. ,\n", - " 1. , 1. , 1. , 1. , 1. ,\n", - " 1. , 1. , 1. , 1. , 1. ,\n", - " 1. , 1. , 1. , 1. , 1. ,\n", - " 1. , 1. ]])" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "import gzip \n", "with gzip.open(\"../data/join_distribution_cumulative_p_3.pkl.gz\") as distrib_pkl:\n", " distrib_delays = pickle.load(distrib_pkl)\n", " \n", "distrib_delays[0:2]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Relate `stop_id`s and `trip_headsign`s to the integer indices used in the algorithm" ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
route_idstop_id_generalstop_nametrip_headsignroute_intstop_introute_desc
026-13-j19-18576240Zürich, MeierhofplatzZürich, Albisgütli01221Tram
126-13-j19-18591353Zürich, SchwertZürich, Albisgütli0816Tram
226-13-j19-18591039Zürich, Alte TrotteZürich, Albisgütli0776Tram
326-13-j19-18591121Zürich, EschergutwegZürich, Albisgütli0307Tram
426-13-j19-18591417Zürich, WaidfusswegZürich, Albisgütli0347Tram
\n", - "
" - ], - "text/plain": [ - " route_id stop_id_general stop_name trip_headsign \\\n", - "0 26-13-j19-1 8576240 Zürich, Meierhofplatz Zürich, Albisgütli \n", - "1 26-13-j19-1 8591353 Zürich, Schwert Zürich, Albisgütli \n", - "2 26-13-j19-1 8591039 Zürich, Alte Trotte Zürich, Albisgütli \n", - "3 26-13-j19-1 8591121 Zürich, Eschergutweg Zürich, Albisgütli \n", - "4 26-13-j19-1 8591417 Zürich, Waidfussweg Zürich, Albisgütli \n", - "\n", - " route_int stop_int route_desc \n", - "0 0 1221 Tram \n", - "1 0 816 Tram \n", - "2 0 776 Tram \n", - "3 0 307 Tram \n", - "4 0 347 Tram " - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "stop_times_df = pkload(\"../data/stop_times_df_cyril.pkl\")[['route_id', 'stop_id_general', 'stop_name', 'trip_headsign', 'route_int', 'stop_int', 'route_desc']]\n", "stop_times_df.head()" ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[8502508 8503078 8503088 ... 8591173 8590772 8503509] ['Spreitenbach, Raiacker' 'Waldburg' 'Zürich HB SZU' ...\n", - " 'Zürich, Haldenbach' 'Rüschlikon, Belvoir' 'Schlieren']\n" - ] - } - ], + "outputs": [], "source": [ "stop_ids_names = stop_times_df[['stop_id_general', 'stop_int', 'stop_name']].drop_duplicates()\n", "assert np.all(stop_ids_names == stop_ids_names.drop_duplicates(subset='stop_int'))\n", "assert np.all(stop_ids_names == stop_ids_names.drop_duplicates(subset='stop_id_general'))\n", "assert np.all(stop_ids_names == stop_ids_names.drop_duplicates(subset='stop_name'))\n", "stop_ids_names = stop_ids_names.sort_values(by='stop_int')\n", "stop_ids = stop_ids_names['stop_id_general'].to_numpy()\n", "stop_names = stop_ids_names['stop_name'].to_numpy()\n", "print(stop_ids, stop_names)" ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "710 8591325 Zürich, Roswiesen\n" - ] - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
route_idstop_id_generalstop_nametrip_headsignroute_intstop_introute_desc
4711026-7-B-j19-18591325Zürich, RoswiesenStettbach, Bahnhof320710Tram
4714126-7-B-j19-18591325Zürich, RoswiesenStettbach, Bahnhof320710Tram
\n", - "
" - ], - "text/plain": [ - " route_id stop_id_general stop_name trip_headsign \\\n", - "47110 26-7-B-j19-1 8591325 Zürich, Roswiesen Stettbach, Bahnhof \n", - "47141 26-7-B-j19-1 8591325 Zürich, Roswiesen Stettbach, Bahnhof \n", - "\n", - " route_int stop_int route_desc \n", - "47110 320 710 Tram \n", - "47141 320 710 Tram " - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "p = np.random.randint(stops.shape[0])\n", "print(p, stop_ids[p], stop_names[p])\n", "stop_times_df[stop_times_df['stop_int'] == p].head(2)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Define the classes and functions making up the journey planner\n", "\n", "Based on a modified version of RAPTOR (reversed RAPTOR), we implement a multiple criteria RAPTOR algorithm.\n", "The optimization criteria are:\n", "- Latest departure\n", "- Highest probability of success of the entire trip\n", "- Lowest number of connections (implicit with the round-based approach)" ] }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "numpy.datetime64('2020-05-11T15:28')" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "# absolute constants:\n", "\n", "tau_change_platform = np.timedelta64(2, 'm')\n", "np.datetime64('2020-05-11T15:30') - tau_change_platform" ] }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# helper functions\n", "\n", "def calc_stopTimes_idx(r, t, offset_p):\n", " \"\"\"Returns the index of the entry in stopTimes\n", " corresponding to the offset_p-th stop of the t-th trip\n", " of route r.\n", " \"\"\"\n", " return (routes[r][3] # 1st trip of route\n", " + t * routes[r][1] # offset for the right trip\n", " + offset_p # offset for the right stop\n", " )\n", "\n", "def get_arrival_time(r, t, offset_p):\n", " \"\"\"Returns 2000 (instead of 0) if t is None.\n", " Otherwise, returns the arrival time of the t-th trip of route r\n", " at the offset_p-th stop of route r.\n", " trips and stops of route r start at t=0, offset_p=0.\n", " \"\"\"\n", " if t is None:\n", " return np.datetime64('2000-01-01T01:00')\n", " \n", " return stopTimes[calc_stopTimes_idx(r,t,offset_p)][0] # 0 for arrival time\n", "\n", "def get_departure_time(r, t, offset_p):\n", " \"\"\"Throws TypeError if t is None.\n", " Otherwise, returns the departure time of the t-th trip of route r\n", " at the offset_p-th stop of route r.\n", " trips and stops of route r start at t=0 & offset_p=0.\n", " \"\"\"\n", " if t is None:\n", " raise TypeError(\"Requested departure time of None trip!\")\n", " \n", " return stopTimes[calc_stopTimes_idx(r,t,offset_p)][1] # 1 for departure time\n", "\n", "def get_stops(r):\n", " \"\"\"Returns the stops of route r\"\"\"\n", " idx_first_stop = routes[r][2]\n", " return routeStops[idx_first_stop:idx_first_stop+routes[r][1]] # n_stops = routes[r][1]\n", "\n", "def time2str(t):\n", " \"\"\"Prints the hour and minutes of np.datetime64 t.\"\"\"\n", " return str(t.astype('datetime64[m]')).split('T')[1]\n", "\n", "def stop_id_to_int(p_id):\n", " \"\"\"Given a stop id, returns the corresponding stop_int\"\"\"\n", " return np.asarray(stop_ids == int(p_id[:7])).nonzero()[0].item()\n", "\n", "def gen_random_stop_id():\n", " \"\"\"Generate a random stop_id to test the journey planner.\"\"\"\n", " return str(stop_ids[np.random.randint(stops.shape[0])])+':0:5DE'" ] }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "class InstantiationException(Exception):\n", " pass\n", "\n", "class BaseLabel:\n", " \"\"\"An abstract base class for Labels. Do not instantiate.\n", " A label corresponds to a recursive (partial) solution, going\n", " to the target stop from the stop currently under consideration.\n", " \"\"\"\n", " indent = \" \"*4\n", " def __init__(self, stop, tau_dep, Pr, n_trips):\n", " self.stop = stop\n", " self.tau_dep = tau_dep\n", " self.Pr = Pr\n", " self.n_trips = n_trips\n", " \n", " def dominates(self, other):\n", " \"\"\"Returns True if self dominates other, else returns False.\n", " other: another Label instance.\n", " \"\"\"\n", " if self.tau_dep >= other.tau_dep \\\n", " and self.Pr >= other.Pr \\\n", " and self.n_trips <= other.n_trips :\n", " return True\n", " return False\n", " \n", " def stop2str(self):\n", " \"\"\"Returns a printable str representing self.stop\"\"\"\n", " return \"{stopN} (stop {stopI})\".format(\n", " stopN = stop_names[self.stop],\n", " stopI = stop_ids[self.stop]\n", " )\n", " \n", " def print_journey(self):\n", " print(\"Departure stop: \", self.stop2str())\n", " print(\"Departure time: \", time2str(self.tau_dep))\n", " print(\"Number of trips used: \", self.n_trips)\n", " print(\"Probability of success: {:.4f}\".format(self.Pr))\n", " \n", " self.print_instructions()\n", " \n", " def to_str(self):\n", " s = \"Departure at {0} from stop {1} (id: {2}, int: {3}).\".format(\n", " self.tau_dep,\n", " stop_names[self.stop],\n", " stop_ids[self.stop],\n", " self.stop\n", " )\n", " return repr(type(self)) + s\n", " \n", " def pprint(self, indent=''):\n", " print(indent, self.to_str())\n", " \n", " def copy(self):\n", " raise InstantiationException(\"class BaseLabel should never \"\n", " \"be instantiated.\"\n", " )\n", "\n", "class ImmutableLabel(BaseLabel):\n", " \"\"\"Base class for immutable Labels\"\"\"\n", " def copy(self):\n", " return self\n", "\n", "class TargetLabel(ImmutableLabel):\n", " \"\"\"A special type of label reserved for the target stop.\"\"\"\n", " def __init__(self, stop, tau_dep):\n", " BaseLabel.__init__(self, stop, tau_dep, 1., 0)\n", " \n", " def print_instructions(self):\n", " \"\"\"Finish printing instructions for the journey.\"\"\"\n", " print(\"Target stop: \", self.stop2str())\n", " print(\"Requested arrival time:\", time2str(self.tau_dep))\n", "\n", "class WalkLabel(ImmutableLabel):\n", " \"\"\"A special type of label for walking connections.\"\"\"\n", " def __init__(self, stop, tau_walk, next_label):\n", " \"\"\"Create a new WalkLabel instance.\n", " stop: stop where you start walking.\n", " tau_walk: (np.timedelta64) duration of the walk.\n", " next_label: label describing the rest of the trip after walking.\n", " \"\"\"\n", " if isinstance(next_label, WalkLabel):\n", " raise ValueError(\"Cannot chain two consecutive WalkLabels!\")\n", " tau_dep = next_label.tau_dep - tau_walk - tau_change_platform\n", " BaseLabel.__init__(self, stop, tau_dep, next_label.Pr, next_label.n_trips)\n", " self.tau_walk = tau_walk\n", " self.next_label = next_label\n", " \n", " def print_instructions(self):\n", " \"\"\"Recursively print instructions for the whole journey.\"\"\"\n", " print(self.indent + \"Walk {:.1f} minutes from\".format(\n", " self.tau_walk / np.timedelta64(1,'m')\n", " ),\n", " self.stop2str()\n", " )\n", " print(self.indent*2 + \"to\", self.next_label.stop2str())\n", " self.next_label.print_instructions()\n", "\n", "class RouteLabel(BaseLabel):\n", " \"\"\"A type of label for regular transports.\"\"\"\n", " def __init__(self,\n", " tau_dep,\n", " r,\n", " t,\n", " offset_p,\n", " next_label,\n", " Pr_connection_success\n", " ):\n", " \n", " self.tau_dep = tau_dep\n", " self.r = r\n", " self.t = t\n", " self.offset_p_in = offset_p\n", " self.offset_p_out = offset_p\n", " self.next_label = next_label\n", " # Store Pr_connection_success for self.copy()\n", " self.Pr_connection_success = Pr_connection_success\n", " \n", " self.route_stops = get_stops(self.r)\n", " self.stop = self.route_stops[self.offset_p_in]\n", " self.Pr = self.Pr_connection_success * self.next_label.Pr\n", " self.n_trips = self.next_label.n_trips + 1\n", " \n", " def update_stop(self, stop):\n", " self.stop = stop\n", " self.offset_p_in = self.offset_p_in - 1\n", " # Sanity check:\n", " assert self.offset_p_in >= 0\n", " assert self.route_stops[self.offset_p_in] == stop\n", " self.tau_dep = get_departure_time(self.r, self.t, self.offset_p_in)\n", " \n", " def print_instructions(self):\n", " \"\"\"Recursively print instructions for the whole journey.\"\"\"\n", " stopTimes_idx = calc_stopTimes_idx(self.r, self.t,\n", " self.offset_p_in)\n", " \n", " print(self.indent + \"At\", self.stop2str())\n", " print(self.indent*2 + \"take the\",\n", " stop_times_df['route_desc'][stopTimes_idx], \"to\",\n", " stop_times_df['trip_headsign'][stopTimes_idx]\n", " )\n", " print(self.indent*2 + \"leaving at\", time2str(self.tau_dep),\n", " \"(route id: {}).\".format(\n", " stop_times_df['route_id'][stopTimes_idx]\n", " )\n", " )\n", " \n", " tau_arr = get_arrival_time(\n", " self.r,\n", " self.t,\n", " self.offset_p_out\n", " )\n", " assert self.next_label.stop == self.route_stops[self.offset_p_out]\n", " \n", " print(self.indent + \"Get out at\", self.next_label.stop2str())\n", " print(self.indent*2 + \"at\", time2str(tau_arr)+\".\")\n", "\n", " self.next_label.print_instructions()\n", " \n", " def copy(self):\n", " \"\"\"When RouteLabels are merged into the bag of a stop,\n", " they must be copied (because they will subsequently\n", " be changed with self.update_stop()).\n", " \"\"\"\n", " l = RouteLabel(self.tau_dep,\n", " self.r,\n", " self.t,\n", " self.offset_p_in,\n", " self.next_label,\n", " self.Pr_connection_success\n", " )\n", " l.offset_p_out = self.offset_p_out\n", " return l" ] }, { "cell_type": "code", - "execution_count": 26, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def run_mc_raptor(stop_id_start,\n", " stop_id_destination,\n", " arrival_time_max,\n", " Pr_min\n", " , incoherences\n", " ):\n", " \"\"\"Run MC RAPTOR, using the data defined in cells above (stopRoutes etc.).\n", " Inputs:\n", " p_s: source stop\n", " p_t: target stop\n", " tau_0: latest acceptable arrival time. str (format: '14:56')\n", " Pr_min: minimum acceptable probability of success\n", " Output:\n", " bags_p_s: bags_p_s[k] contains the pareto set of non-dominated journeys\n", " from p_s to p_t that use at most k different trips (i.e,\n", " getting in at most k different vehicles), under the given\n", " constraints:\n", " 1. Each journey must succeed with a probability\n", " greater or equal to Pr_min.\n", " 2. The journey is a success if and only if all individual\n", " connections succeed, including the user's appointment\n", " in p_t at tau_0.\n", " 3. A connection succeeds if, and only if, the user reaches\n", " the platform before or on the scheduled departure time\n", " (allowing some time to change platforms)\n", " Non-dominated:\n", " A journey J1 is *dominated* by another journey J2, if\n", " J2 departs no earlier than J1 AND the probability of\n", " success of J2 is no less than that of J1.\n", " Pareto set:\n", " Each bag in bags_p_s contains only journeys that are not\n", " dominated by any other possible journey. Such a collection\n", " of non-dominated solutions is called a *Pareto set*.\n", " \n", " Each journey is represented as a Label that forms the start of a chain.\n", " The journey can be reconstructed by calling label.print_journey().\n", " \"\"\"\n", "# Input sanitization:\n", " try:\n", " tau_0 = np.datetime64('2020-05-24T'+arrival_time_max)\n", " p_s = stop_id_to_int(stop_id_start)\n", " p_t = stop_id_to_int(stop_id_destination)\n", " except Exception as e:\n", " print(\"ERROR parsing input! Please make sure that:\")\n", " print(\"- stop_id_start and stop_id_destination are strings \"\n", " \"beginning with 7 or more digits.\")\n", " print(\"- arrival_time_max is in the format 'hh:mm'. Example: '09:42'.\")\n", " raise e\n", " \n", "# initialization\n", " k_max = 10 # Maximum number of rounds\n", " \n", " # For each route and for each label at each stop p, we will look at the n latest\n", " # trips until we find a trip for which the individual connection at stop p\n", " # succeeds with a probability at least equal to Pr_threshold.\n", " # Under some reasonable assumptions, setting Pr_threshold = Pr_min**(1/k)\n", " # guarantees that we will find a solution, if a solution exists involving at\n", " # most k connections (including the user's appointment in p_t at tau_0).\n", " Pr_threshold = Pr_min**(0.1)\n", " \n", " # Initialize empty bags for each stop for round 0:\n", " n_stops = stops.shape[0]\n", " bags = [\n", " [\n", " [] # an empty bag\n", " for _ in range(n_stops)] # one empty bag per stop\n", " ]\n", "\n", " # Create a TargetLabel for p_t, and mark p_t\n", " bags[0][p_t].append(TargetLabel(p_t, tau_0))\n", " marked = {p_t}\n", "\n", " print(\"Searching for journeys from\",\n", " stop_names[p_s],\n", " \"(stop {})\".format(stop_ids[p_s]),\n", " \"to\", bags[0][p_t][0].stop2str())\n", "\n", "# Define bag operations (they depend on p_s and Pr_min for target pruning):\n", - " def update_bag(bag, label, k):\n", + " def update_bag(bag, label, k, target_pruning=True):\n", " \"\"\"Add label to bag and remove dominated labels.\n", " bag is altered in-place.\n", "\n", " k: Round number, used for target pruning.\n", "\n", " returns: Boolean indicating whether bag was altered.\n", " \"\"\"\n", - " # Apply the Pr_min constraint to label:\n", - " if label.Pr < Pr_min:\n", - " return False\n", - "\n", - " # Prune label if it is dominated by bags[k][p_s]:\n", - " for L_star in bags[k][p_s]:\n", - " if L_star.dominates(label):\n", + " if target_pruning:\n", + " # Apply the Pr_min constraint to label:\n", + " if label.Pr < Pr_min:\n", " return False\n", "\n", + " # Prune label if it is dominated by bags[k][p_s]:\n", + " for L_star in bags[k][p_s]:\n", + " if L_star.dominates(label):\n", + " return False\n", + "\n", " # Otherwise, merge label into bag1\n", " changed = False\n", " for L_old in bag:\n", " if L_old.dominates(label):\n", " return changed\n", " if label.dominates(L_old):\n", " bag.remove(L_old)\n", " changed = True\n", " bag.append(label.copy())\n", " return True\n", "\n", - " def merge_bags(bag1, bag2, k):\n", + " def merge_bags(bag1, bag2, k, target_pruning=True):\n", " \"\"\"Merge bag2 into bag1 in-place.\n", " k: Round number, used for target pruning.\n", " returns: Boolean indicating whether bag was altered.\n", " \"\"\"\n", " changed = False\n", " for label in bag2:\n", - " changed = changed or update_bag(bag1, label, k)\n", + " changed = update_bag(bag1, label, k, target_pruning) or changed\n", " return changed\n", " \n", " globals().update({'merge_bags': merge_bags})\n", " \n", "# Define the footpaths-checking function (depends on update_bag)\n", " def check_footpaths(bags, marked, k):\n", " \"\"\"Modify bags and marked in-place to account for foot-paths.\"\"\"\n", " q = []\n", " for p in marked:\n", " for pPrime, delta_seconds in transfers[stops[p][2]:stops[p][3]]:\n", " q.append((p, pPrime, delta_seconds))\n", " for p, pPrime, delta_seconds in q:\n", " for L_k in bags[k][p]:\n", " # We do not allow two consecutive walking trips\n", " if not isinstance(L_k, WalkLabel):\n", " L_new = WalkLabel(pPrime, np.timedelta64(delta_seconds, 's'), L_k)\n", " if update_bag(bags[k][pPrime], L_new, k):\n", " marked.add(pPrime)\n", "\n", "# main loop\n", " indent= ' '*4\n", "\n", " k = 0\n", " # Check footpaths leading to p_t at k=0:\n", " check_footpaths(bags, marked, k)\n", " while k < k_max:\n", " k += 1 # k=1 at fist round, as it should.\n", "\n", " # Instead of using best bags, carry over the bags from last round.\n", " # if len(bags <= k):\n", "\n", " bags.append([bags[-1][p].copy() for p in range(n_stops)])\n", "\n", " print('\\n', ' '*30, 'STARTING round k =', k)\n", " if len(marked) < 50:\n", " print('Marked stops at the start of the round:', [stop_ids[p] for p in marked])\n", " else:\n", " print('Number of marked stops at the start of the round:', len(marked))\n", " \n", " # accumulate routes serving marked stops from previous rounds\n", " q = []\n", " for p in marked:\n", " for r in stopRoutes[stops[p][0]:stops[p][1]]: # foreach route r serving p\n", " append_r_p = True\n", " for idx, (rPrime, pPrime) in enumerate(q): # is there already a stop from the same route in q ?\n", " if rPrime == r:\n", " append_r_p = False\n", " p_pos_in_r = np.where(get_stops(r) == p)[0][-1]\n", " pPrime_pos_in_r = np.where(get_stops(r) == pPrime)[0][-1]\n", " if p_pos_in_r > pPrime_pos_in_r:\n", " q[idx] = (r, p) # substituting (rPrime, pPrime) by (r, p)\n", " if append_r_p:\n", " q.append((r, p))\n", " marked.clear() # unmarking all stops\n", "# print(\"Queue:\", q)\n", "\n", "# print('Queue before traversing each route: {}'.format(q))\n", " # traverse each route\n", " for (r, p) in q:\n", "# print('\\n****TRAVERSING ROUTE r={0} from stop p={1}****'.format(r, p))\n", " B_route = [] # new empty route bag\n", "\n", " # we traverse the route backwards (starting at p, not from the end of the route)\n", " stops_of_current_route = get_stops(r)\n", "# print('Stops of current route:', stops_of_current_route)\n", " offset_p = np.asarray(stops_of_current_route == p).nonzero()[0]\n", " if offset_p.size < 1:\n", " if not p in incoherences:\n", " incoherences[p] = set()\n", " incoherences[p].add(r)\n", "# print(\"WARNING: route {r} is said to serve stop {p} in stopRoutes, but stop {p} \"\n", "# \"is not included as a stop of route {r} in routeStops...\".format(p=p, r=r))\n", " offset_p = -1\n", " else:\n", " offset_p = offset_p[-1]\n", " for offset_p_i in range(offset_p, -1, -1):\n", " p_i = stops_of_current_route[offset_p_i]\n", "# print('\\n\\n'+indent+\"p_i: {}\".format(p_i))\n", "\n", " # Update the labels of the route bag:\n", " for L in B_route:\n", " L.update_stop(p_i)\n", "\n", " # Merge B_route into bags[k][p_i]\n", " if merge_bags(bags[k][p_i], B_route, k):\n", "# print(\"marking stop\", p_i)\n", " marked.add(p_i)\n", "\n", " # Can we step out of a later trip at p_i ?\n", " # This is only possible if we already know a way to get from p_i to p_t in < k vehicles\n", " # (i.e., if there is at least one label in bags[k-1][p_i])\n", " for L_k in bags[k-1][p_i]:\n", " # Note that k starts at 1 and bags[0][p_t] contains a TargetLabel.\n", "# print('\\n'+indent+'----scanning arrival times for route r={0} at stop p_i={1}----'.format(r, p_i))\n", "\n", " # We check the trips from latest to earliest\n", " for t in range(routes[r][0]-1, -1, -1): # n_trips = routes[r][0]\n", " # Does t_r arrive early enough for us to make the rest \n", " # of the journey from here (tau[k-1][p_i])?\n", " tau_arr = get_arrival_time(r, t, offset_p_i)\n", "# print(indent+'arrival time: ', tau_arr)\n", " if tau_arr <= L_k.tau_dep - tau_change_platform:\n", "\n", " max_delay = L_k.tau_dep - tau_arr - tau_change_platform\n", " max_delay_int = min(max_delay.astype('timedelta64[m]').astype('int'), 30)\n", " \n", " Pr_connection = distrib_delays[calc_stopTimes_idx(r, t, offset_p_i),\n", " max_delay_int + 1]\n", "# print(Pr_connection)\n", " L_new = RouteLabel(get_departure_time(r, t, offset_p_i),\n", " r,\n", " t,\n", " offset_p_i,\n", " L_k,\n", " Pr_connection\n", " )\n", " update_bag(B_route, L_new, k)#:\n", "# print(indent+\"Explored connection from\")\n", "# L_new.pprint(indent*2)\n", "# print(indent+\"to\")\n", "# L_k.pprint(indent*2)\n", "\n", " # We don't want to add a label for every trip that's earlier than tau_dep.\n", " # Instead, we stop once we've found a trip that's safe enough.\n", " if Pr_connection > Pr_threshold:\n", " break\n", " \n", "# print(marked)\n", " # Look at foot-paths (bags and marked are altered in-place):\n", " check_footpaths(bags, marked, k)\n", "# print(marked)\n", " \n", " # Report the number of new journeys found this round:\n", " n_new_journeys = len(bags[k][p_s]) - len(bags[k-1][p_s])\n", " if n_new_journeys:\n", " print(\"Found\", n_new_journeys, \"non-dominated journeys with\", k, \"trips.\")\n", " # If there's a journey with X trips, we won't look for a solution with X+3 trips:\n", " if not bags[k-1][p_s]:\n", " # The condition above means we found a journey for the first time.\n", " print(\"Will search for journeys with up to\", k+1, \"trips.\")\n", " k_max = k+1\n", "\n", " # Additional stopping criterion: reached equilibrium\n", " if not marked:\n", " print(\"\\n\", ' '*15, \"*\"*15, \" THE END \", \"*\"*15)\n", " print(\"Equilibrium reached with\" + \"out\"*(not bags[k][p_s]),\n", " \"finding a solution. The end.\")\n", " return [bags[K][p_s] for K in range(len(bags))], bags\n", " \n", " # We have exited the while-loop because k==kmax:\n", " print(\"\\n\" + \"*\"*15 + \" THE END \" + \"*\"*15)\n", " if bags[k][p_s]:\n", " print(\"There is a solution with\", k-1, \"trips.\")\n", " else:\n", " print(\"There are no solutions with up to\", k, \"trips.\")\n", " print(\"We shall not search for journeys with\", k+1, \"or more trips.\")\n", "\n", " return [bags[K][p_s] for K in range(len(bags))], bags\n", "\n", "def time_sorted_pareto(bags_p):\n", " \"\"\"Input: list of bags, e.g. for one stop and various k.\n", " It is assumed that Pr >= Pr_min for each label.\n", " Output: A Pareto set of non-dominated labels (np.array),\n", " sorted by decreasing departure time.\n", " \"\"\"\n", " res_bag = []\n", " for bag in bags_p:\n", - " globals()['merge_bags'](res_bag, bag, 0)\n", + " globals()['merge_bags'](res_bag, bag, 0, target_pruning=False)\n", " res = np.array(res_bag)\n", " return res[np.argsort([label.tau_dep for label in res])[::-1]]\n", "\n", "def print_solutions(bags_p):\n", " print(\"Showing Pareto set of non-dominated journeys\")\n", " if not bags_p[-1]:\n", " print(\"There are no journeys to print.\")\n", " return\n", " L_s = bags_p[-1][0]\n", " L_t = L_s\n", " while not isinstance(L_t, TargetLabel):\n", " L_t = L_t.next_label\n", " print(\"from\", L_s.stop2str(), \"to\", L_t.stop2str())\n", " print(\"with criteria:\")\n", " print(\" - maximize departure time\")\n", " print(\" - maximize probability of success\")\n", " print(\" - minimize number of individual trips\")\n", " print(\"and constraints:\")\n", " print(\" - arrival time at least\", tau_change_platform,\n", " \"minutes before\", time2str(L_t.tau_dep), \"(2 minutes to walk from the platform to the extraction point).\")\n", " print(\" - probability of success at least {Pr_min:.4f}.\".format(**globals()))\n", " print(\"\\nJourneys are sorted by descending departure time.\")\n", " \n", " for i, label in enumerate(time_sorted_pareto(bags_p)):\n", " print('\\n'*2, '-'*10, 'OPTION', i+1, '-'*10)\n", " label.print_journey()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ " " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ " " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ " " ] } ], "metadata": { "jupytext": { "formats": "ipynb,md,py:percent" }, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.6" } }, "nbformat": 4, "nbformat_minor": 4 }