simple_intersection_env.py 52.6 KB
Newer Older
Aravind Bk's avatar
Aravind Bk committed
1 2 3 4 5 6
import pyglet
import gym
from copy import deepcopy

from env import RoadEnv, EpisodicEnvBase

Ashish Gaurav's avatar
Ashish Gaurav committed
7
from .vehicles import Vehicle
Aravind Bk's avatar
Aravind Bk committed
8 9
from .utilities import calculate_s, calculate_v_max
from .utilities import BoundingBox as bb_utils
Ashish Gaurav's avatar
Ashish Gaurav committed
10
from .features import Features
Aravind Bk's avatar
Aravind Bk committed
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
from .constants import *

from model_checker.simple_intersection import LTLProperty

# import the classes for graphical output.
from .shapes import Text, Image
from .road_networks import RoadNetworkCross
from .vehicle_networks import VehicleNetworkCross
from .utilities import get_APs, get_veh_attributes, config_Pyglet


class SimpleIntersectionEnv(RoadEnv, EpisodicEnvBase):
    """Road environment corresponding to the cross scenario.

    This scenario has just one horizontal lane and one vertical lane.
    Relevant constants are used from the constants file.
    """

    #: The vehicle-to-vehicle desired distance in the aggressive driving policy
    V2V_ref = 6.0

    #: The maximum distance within which self.vehs[i].APs['veh_ahead'] = True
    veh_ahead_max = 40.0

    #: The minimum distance that every two adjacent vehicles has to be apart from each other.
Ashish Gaurav's avatar
Ashish Gaurav committed
36
    veh_ahead_min = 2 * VEHICLE_SIZE_HALF[0]
Aravind Bk's avatar
Aravind Bk committed
37 38 39 40 41 42 43 44 45 46 47 48 49

    #: The penalty (i.e., the negative reward) given at the end of the maneuver when
    # the ego has a collision. A penalty is a negative reward so will be summed up to
    # the reward with subtraction.
    _penalty_in_ego_collision = 200

    #: the penalty for the LTL constraint violation.
    _penalty_in_violation = 200

    #: the penalty for the ego's heading angle out-of-range.
    _penalty_in_ego_theta_out_of_range = 200

    #: the maximum of the ego's heading angle
Ashish Gaurav's avatar
Ashish Gaurav committed
50
    max_ego_theta = np.pi / 3
Aravind Bk's avatar
Aravind Bk committed
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68

    #: The weight vector to calculate the cost. In the maneuver, cost_weights
    # can be set to a specific value which may be different than the default.
    cost_weights = (1.0, 0.25, 0.1, 1.0, 100.0, 0.1, 0.25, 0.1)

    #TODO: Move this to constants
    # The empirical min and max of each term in the cost vector, which is used to normalize the values
    cost_normalization_ranges = \
        [[-3.5, 3.5], [-6.2, 6.2],
         [-12*np.tan(MAX_STEERING_ANGLE) / VEHICLE_WHEEL_BASE, -12*np.tan(MAX_STEERING_ANGLE) / VEHICLE_WHEEL_BASE],
         [-12, 12], [0, 12], [-2, 2], [-16, 16], [-2, 2]]

    # Flag to normalize cost (see the cost method). False by default
    normalize_cost = False

    window = None

    def _init_LTL_preconditions(self):
Ashish Gaurav's avatar
Ashish Gaurav committed
69 70 71 72 73 74 75 76 77 78
        self._LTL_preconditions.append(
            LTLProperty("G( in_intersection => intersection_is_clear)",
                        self._penalty_in_violation))
        self._LTL_preconditions.append(
            LTLProperty("G( not in_intersection U highest_priority)",
                        self._penalty_in_violation))
        self._LTL_preconditions.append(
            LTLProperty(
                "G( in_stop_region => (in_stop_region U has_stopped_in_stop_region) )",
                self._penalty_in_violation))
Aravind Bk's avatar
Aravind Bk committed
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152

    def __init__(self, **kwargs):
        """SimpleIntersectionEnv constructor.

        Args:
            options: high level options instance
            n_other_vehs: number of other vehicles
        """

        super().__init__()

        # load the background texture
        self.background = Image(
            'background.png', anchor=(0, 0), image_type="bg")

        # load the road network
        self.road_network = RoadNetworkCross('road_texture.png')

        #: the reward for achieving goal is 200 by default.
        self._reward_in_goal = 200

        #: Terminate the episode when the goal is achieved (see SimpleIntersectionEnv.goal_achieved)
        self._terminate_in_goal = True

        #: info for displaying on top of ego
        self.ego_info_text = ""

        #: the number of the other vehicles, initialized in generate_scenario
        self.n_other_vehs = None

        #: the list of the vehicles, initialized in generate_scenario
        self.vehs = list()

        #: the vehicle_nework for render, initialized in generate_scenario
        self.vehicle_network = None

        #: the index of the vehicle immediately ahead from the ego,
        #  initialized in generate_scenario when vehicle_ahead_scenario = True
        self.veh_ahead_index = None

        #: the index of the vehicle always stopped after intersection
        # on the same lane as the egos's, initialized in generate_scenario
        # when stopped_car_scenario = True
        self.stopped_veh_index = None

        #: the list of indices of the vehicles stopped in the stop region,
        #  initialized in generate_scenario when n_others_stopped_in_stop_region >= 1
        self.stopped_veh_in_stop_region_indices = list()

        #: a collision btw the other vehicles happened
        self.__others_collision_happened = False
        #: a collision of the ego happened
        self.__ego_collision_happened = False
        #: A boolean representing whether the ego is driving right (False) or in reverse (True).
        self.__ego_theta_out_of_range = False

        #: the reference velocity that the ego tries to follow
        # (= rd.speed_limit by default).
        self.v_ref = rd.speed_limit

        #: the target_lane (0: left, 1: right) of the ego to follow
        # (it will be set to the ego's lane by default)
        self.target_lane = 0

        # Save the scenario option to reuse it in reset
        self.scenario_option = kwargs

        self.generate_scenario(**kwargs)

    @property
    def ego(self):
        return self.vehs[EGO_INDEX]

    def generate_scenario(self, **kwargs):
Ashish Gaurav's avatar
Ashish Gaurav committed
153
        """Randomly generate a road scenario with.
Aravind Bk's avatar
Aravind Bk committed
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231

            "the N-number of vehicles + an ego vehicle"

        (n_others_range[0] <= N <= n_others_range[1]),
        all of whose speeds are also set randomly (or deterministically depending
        on the options) between 0 and the speed limit of the road environment.
        The position and speed of the vehicles are set (some of them randomly) but
        not in a way that causes inevitable collision or violation. The ego vehicle is
        always posed on a horizontal lane and there is initially no or only one
        vehicle posed in the intersection.

        TODO: More debugging
        TODO: Add more comments for each block within the function for comprehensive understanding.
        TODO: restructure the code by making private method for each part for a certain option.

        Args:
            n_others_range: a tuple (n1, n2), which gives the range of the vehicles
                on the road, excluding the ego vehicle, by n1 <= N <= n2.
            ego_pos_range: a tuple or list (x1, x2); the ego's x-position
                will be sampled uniformly in [x1, x2].
            ego_v_upper_lim: the ego's velocity limit. the ego's speed
                will be sampled uniformly in [0, ego_v_upper_lim]
            ego_lane: the lane in which ego will be initialized
            ego_perturb_lim: a tuple (da, dtheta), where da is the absolute maximum
                of the ego's initial perturbations along the y-axis from the lane
                centre; dtheta is the absolute maximum of the ego's heading angle
                perturbation from the horizontal line (the angle parallel to the
                horizontal lane). The perturbation along the y-axis is
                sampled from

                    "uniform(-ego_perturb_lim[0], ego_perturb_lim[0])"

                within the lane boundary. Similarly, the perturbation of the
                heading angle is sampled from

                    "uniform(-ego_perturb_lim[1], ego_perturb_lim[1])."

                da is restricted by the width of the road, and dtheta the non-
                holonomic constraint;
            ego_heading_towards_lane_centre: If True, then the ego's heading angle is
                always initialized towards the lane centre.
            others_h_pos_range: the range of the other vehicles that are on the
                horizontal route and are not corresponding to the vehicle
                generated by the code for a special scenario below.
            others_v_pos_range: the range of the other vehicles that are on the
                vertical route and initially not stopped in the stop region.
            n_others_stopped_in_stop_region: the number of the other vehicles
                that stopped in the stop region, on the other lanes than that
                of the ego. The maximum is 3 (as there are 4-lanes including
                ego's). When this is true, the waited_count of the stopped cars
                (except the ego) in the stop region will be set randomly. This
                option is useful when generating an initial scenario for Wait
                maneuver.
            veh_ahead_scenario: if True, the generated scenario has a vehicle
                ahead of the ego within self.veh_ahead_max. This option is useful
                in generating an initial scenario for e.g., Follow and Pass.
            stopped_car_scenario: if True, the generated scenario contains a
                stopped car located after the intersection but in the same lane as
                ego's TODO: this option is not fully tested and need some debugging.
            allow_vehicles_in_ego_lane: if False, there will be no vehicle in the
                same lane to the ego's. This option is useful when you generate an
                initial scenario like those in Default and Finish maneuvers.
            v_max_multiplier: a constant between 0 and 1, used in resetting the speeds
                of the vehicles if necessary to make sure that at least an input to
                the ego exists to avoid collisions or stop-sign violations. The less
                v_max_multiplier, more conservative scenario is generated (but when
                v_max_multipler = 1, it could generate a scenario where a collision
                is inevitable). 0.9 is a value set by the developer through trial-and-
                error and exhaustive testing.
            randomize_special_scenarios: If True, randomizes scenarios such as vehicle_ahead_scenario,
                stopped_car_scenario and n_others_stopped_in_stop_region
        :returns
            False if the initial state is Terminal, True if not.
        """

        n_others_range = \
            kwargs.setdefault('n_others_range', (0, MAX_NUM_VEHICLES)) \
            # Set to 1 when testing follow specific behavior
Ashish Gaurav's avatar
Ashish Gaurav committed
232

Aravind Bk's avatar
Aravind Bk committed
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
        ego_pos_range = \
            kwargs.setdefault('ego_pos_range', (rd.hlanes.start_pos, rd.hlanes.stop_region[0]))
        ego_v_upper_lim = \
            kwargs.setdefault('ego_v_upper_lim', rd.speed_limit)
        ego_lane = \
            kwargs.setdefault('ego_lane', None)
        others_h_pos_range = \
            kwargs.setdefault('others_h_pos_range', rd.hlanes.pos_range)
        others_v_pos_range = \
            kwargs.setdefault('others_v_pos_range', rd.vlanes.pos_range)
        n_others_stopped_in_stop_region = \
            kwargs.setdefault('n_others_stopped_in_stop_region', 0)
        veh_ahead_scenario = \
            kwargs.setdefault('veh_ahead_scenario', False)
        stopped_car_scenario = \
            kwargs.setdefault('stopped_car_scenario', False)
        allow_vehicles_in_ego_lane = \
            kwargs.setdefault('allow_vehicles_in_ego_lane', True)
        v_max_multiplier = \
            kwargs.setdefault('v_max_multiplier', 0.9)
        ego_perturb_lim = \
            kwargs.setdefault('ego_perturb_lim', (0, 0))
        ego_heading_towards_lane_centre = \
            kwargs.setdefault('ego_heading_towards_lane_centre', True)
        randomize_special_scenarios = \
258
            kwargs.setdefault('randomize_special_scenarios', False)
Aravind Bk's avatar
Aravind Bk committed
259 260

        if n_others_range[0] < 0:
Ashish Gaurav's avatar
Ashish Gaurav committed
261 262
            raise ValueError(
                "the number of other vehicles has to be non-negative.")
Aravind Bk's avatar
Aravind Bk committed
263 264

        if n_others_range[1] < n_others_range[0]:
Ashish Gaurav's avatar
Ashish Gaurav committed
265 266
            raise ValueError(
                "the range of the number of other vehicles is wrong.")
Aravind Bk's avatar
Aravind Bk committed
267 268 269 270 271

        n_others = np.random.randint(n_others_range[0], n_others_range[1] + 1)

        if randomize_special_scenarios and n_others >= 1:
            # stopped_car_scenario = bool(np.random.randint(0, 1)) TODO: this scenario may not work
Ashish Gaurav's avatar
Ashish Gaurav committed
272 273
            n_others_stopped_in_stop_region = np.random.randint(
                0, min(3, n_others - stopped_car_scenario))
Aravind Bk's avatar
Aravind Bk committed
274 275
            veh_ahead_scenario = bool(np.random.randint(0, 1))

Ashish Gaurav's avatar
Ashish Gaurav committed
276 277 278 279 280
        if n_others_stopped_in_stop_region > min(
                n_others - stopped_car_scenario, 3):
            raise ValueError(
                "n_others_stopped_in_stop_region has to be less than min(3, n_others_range[0] - stopped_car_scenario)."
            )
Aravind Bk's avatar
Aravind Bk committed
281 282

        if veh_ahead_scenario > n_others:
Ashish Gaurav's avatar
Ashish Gaurav committed
283 284 285
            raise ValueError(
                "the minimum number of the other vehicles (n_others_range[0]) has to be at least larger than or equal to 1 for veh_ahead_scenario."
            )
Aravind Bk's avatar
Aravind Bk committed
286 287 288 289 290 291 292 293

        if not isinstance(veh_ahead_scenario, bool):
            raise ValueError("veh_ahead_scenario has to be of boolean type.")

        if not isinstance(stopped_car_scenario, bool):
            raise ValueError("stopped_car_scenario has to be of boolean type.")

        if not isinstance(allow_vehicles_in_ego_lane, bool):
Ashish Gaurav's avatar
Ashish Gaurav committed
294 295
            raise ValueError(
                "allow_vehicles_in_ego_lane has to be of boolean type.")
Aravind Bk's avatar
Aravind Bk committed
296 297

        if not (0 <= v_max_multiplier <= 1):
Ashish Gaurav's avatar
Ashish Gaurav committed
298 299
            raise ValueError(
                "v_max_multiplier has to be within the range [0, 1].")
Aravind Bk's avatar
Aravind Bk committed
300 301 302 303 304 305 306 307 308

        if ego_v_upper_lim < 0:
            raise ValueError("ego_v_upper_lim has to be non-negative.")

        if ego_lane not in {0, 1} and ego_lane is not None:
            raise ValueError("ego_lane has to be 0 or 1.")

        for perturb_lim in ego_perturb_lim:
            if perturb_lim < 0:
Ashish Gaurav's avatar
Ashish Gaurav committed
309 310 311
                raise ValueError(
                    "the perturbation term in ego_perturb_lim has to be non-negative."
                )
Aravind Bk's avatar
Aravind Bk committed
312 313

        if ego_perturb_lim[0] > rd.hlanes.width:
Ashish Gaurav's avatar
Ashish Gaurav committed
314 315 316
            raise ValueError(
                "the maximum y-perturbation (e.g., ego_perturb_lim[0]) is out-of-range."
            )
Aravind Bk's avatar
Aravind Bk committed
317 318

        if ego_perturb_lim[1] >= np.pi / 2:
Ashish Gaurav's avatar
Ashish Gaurav committed
319 320 321
            raise ValueError(
                "the maximum theta-perturbation (i.e., ego_perturb_lim[1]) is out-of-range."
            )
Aravind Bk's avatar
Aravind Bk committed
322 323

        if not isinstance(ego_heading_towards_lane_centre, bool):
Ashish Gaurav's avatar
Ashish Gaurav committed
324 325
            raise ValueError(
                "ego_heading_not_outwards has to be of boolean type.")
Aravind Bk's avatar
Aravind Bk committed
326 327 328 329 330 331 332

        self.n_other_vehs = n_others

        # a list of vehicles including ego vehicle i = 0 and the others
        # (1 < i <= n_other_vehs <= MAX_NUM_VEHICLES).
        # The ego vehicle i = 0 has to be on the horizontal lane.
        self.vehs.clear()  # Remove the existing vehicles if any.
Ashish Gaurav's avatar
Ashish Gaurav committed
333 334
        self.vehs.append(
            Vehicle('h', v_upper_lim=ego_v_upper_lim, lane=ego_lane))
Aravind Bk's avatar
Aravind Bk committed
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354

        ego_lane = self.ego.APs['lane']
        ego_lane_centre = rd.hlanes.centres[int(ego_lane)]

        ego_pos_min = min(ego_pos_range)
        ego_pos_max = max(ego_pos_range)

        others_h_pos_min = min(others_h_pos_range)
        others_h_pos_max = max(others_h_pos_range)

        others_v_pos_min = min(others_v_pos_range)
        others_v_pos_max = max(others_v_pos_range)

        self.ego.x = np.random.uniform(ego_pos_min, ego_pos_max)

        # sample the ego's y-pos within the lane boundary
        # (ego's center will be in the current lane by the
        # inspection above)
        while True:
            self.ego.y = ego_lane_centre + np.random.uniform(
Ashish Gaurav's avatar
Ashish Gaurav committed
355 356 357 358
                max(-ego_perturb_lim[0], min(rd.hlanes.sidewalks)),
                min(ego_perturb_lim[0], max(rd.hlanes.sidewalks)))
            sign = np.sign(self.ego.y -
                           rd.hlanes.centres[self.ego.APs['lane']])
Aravind Bk's avatar
Aravind Bk committed
359 360 361 362 363
            if sign >= 0 and ego_heading_towards_lane_centre:
                self.ego.theta = np.random.uniform(-ego_perturb_lim[1], 0)
            elif sign < 0 and ego_heading_towards_lane_centre:
                self.ego.theta = np.random.uniform(0, ego_perturb_lim[1])
            else:  # if not ego_heading_towards_lane_centre:
Ashish Gaurav's avatar
Ashish Gaurav committed
364 365
                self.ego.theta = np.random.uniform(-ego_perturb_lim[1],
                                                   ego_perturb_lim[1])
Aravind Bk's avatar
Aravind Bk committed
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380

            if not self.is_ego_off_road():
                break

        self.ego.init_local_discrete_var()

        # reset v_ref and target_lane to their default values
        self.v_ref = rd.speed_limit
        self.target_lane = self.ego.APs['lane']

        self.init_APs(False)

        # s is the distance that the vehicle (in this case, the ego)
        # has to inevitably drive for complete stop.
        s = calculate_s(self.ego.v)
Ashish Gaurav's avatar
Ashish Gaurav committed
381 382
        s = SimpleIntersectionEnv.__check_init_veh(self.ego, s,
                                                   v_max_multiplier)
Aravind Bk's avatar
Aravind Bk committed
383 384 385 386 387 388 389
        s_seq = [s]

        curr_i = 1
        veh_ahead = None
        if veh_ahead_scenario:
            while True:
                veh_ahead = Vehicle('h', self.ego.APs['lane'], rd.speed_limit)
Ashish Gaurav's avatar
Ashish Gaurav committed
390 391
                V2V_dist = np.random.uniform(self.veh_ahead_min,
                                             self.veh_ahead_max)
Aravind Bk's avatar
Aravind Bk committed
392 393 394 395
                veh_ahead.x = self.ego.x + 2 * VEHICLE_SIZE_HALF[0] + V2V_dist
                veh_ahead.init_local_discrete_var()

                s = calculate_s(veh_ahead.v)
Ashish Gaurav's avatar
Ashish Gaurav committed
396 397
                s = SimpleIntersectionEnv.__check_init_veh(
                    veh_ahead, s, v_max_multiplier)
Aravind Bk's avatar
Aravind Bk committed
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414

                self.vehs.append(veh_ahead)
                self.init_APs()
                s_seq.append(s)

                if veh_ahead.APs['in_intersection']:
                    if veh_ahead.APs['intersection_is_clear']:
                        self.init_APs(False)
                        break
                    else:
                        self.vehs.pop()
                        self.init_APs()
                        s_seq.pop()
                        continue
                else:
                    break

Ashish Gaurav's avatar
Ashish Gaurav committed
415 416 417 418 419
            while V2V_dist + s_seq[curr_i] < s_seq[curr_i -
                                                   1] + self.veh_ahead_min:
                s_seq[curr_i - 1] = SimpleIntersectionEnv.__regulate_veh_speed(
                    self.ego, V2V_dist + s_seq[curr_i] - self.veh_ahead_min,
                    v_max_multiplier)
Aravind Bk's avatar
Aravind Bk committed
420 421 422 423 424 425 426 427 428 429

            curr_i += 1

        if stopped_car_scenario:
            stopped_veh = Vehicle('h', self.ego.APs['lane'], rd.speed_limit)
            if not veh_ahead_scenario:
                min_x_for_stopped_veh = self.ego.x + 2 * VEHICLE_SIZE_HALF[0] + self.veh_ahead_min
            else:
                min_x_for_stopped_veh = veh_ahead.x + 2 * VEHICLE_SIZE_HALF[0] + self.veh_ahead_min

Ashish Gaurav's avatar
Ashish Gaurav committed
430 431
            min_x_for_stopped_veh = min(min_x_for_stopped_veh,
                                        rd.intersection_width_w_offset / 2)
Aravind Bk's avatar
Aravind Bk committed
432

Ashish Gaurav's avatar
Ashish Gaurav committed
433 434
            stopped_veh.x = np.random.uniform(min_x_for_stopped_veh,
                                              rd.hlanes.max_pos)
Aravind Bk's avatar
Aravind Bk committed
435 436 437 438 439 440 441 442 443
            stopped_veh.v = 0
            stopped_veh.init_local_discrete_var()

            self.vehs.append(stopped_veh)
            self.init_APs(False)
            s_seq.append(0)

            for i in range(0, curr_i):
                _, V2V_dist = self.get_V2V_distance(curr_i - i - 1)
Ashish Gaurav's avatar
Ashish Gaurav committed
444 445 446 447 448 449 450 451
                while V2V_dist + s_seq[curr_i -
                                       i] < s_seq[curr_i - i -
                                                  1] + self.veh_ahead_min:
                    s_seq[curr_i - i -
                          1] = SimpleIntersectionEnv.__regulate_veh_speed(
                              self.vehs[curr_i - i - 1], V2V_dist +
                              s_seq[curr_i - i] - self.veh_ahead_min,
                              v_max_multiplier)
Aravind Bk's avatar
Aravind Bk committed
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483

            self.init_APs(False)
            curr_i += 1

        # randomly pose the (n_others_stopped_in_stop_region)-number of vehicles,
        # with zero speeds, in stop regions but not on the same lane as the ego's.
        if n_others_stopped_in_stop_region:
            index_flag = [0] * n_others_stopped_in_stop_region

            # 0s <= max_waited_count * DT <= 1s;
            # this is for the random choice of waited_count of the vehicles
            # which initially have stopped in the stop regions.
            max_waited_count = np.random.randint(0, 11)
            for i in range(0, n_others_stopped_in_stop_region):

                # Randomly pick an index j = 0, 1, 2. Remember that 3 is the
                # maximum number of vehicles initially forced to be posed
                # in the stop region with zero speeds, i.e.,
                # n_others_stopped_in_stop_region = 0, 1, 2, or 3
                # (see the inspection above).
                while True:
                    j = np.random.randint(0, n_others_stopped_in_stop_region)
                    # if found an empty intersection, break the loop.
                    if not index_flag[j]:
                        break

                # Now, the stop region corresponding to index_flag[j]-th lane
                # will be occupied.
                index_flag[j] = True

                if j == 0:  # 'h'orizontal lane
                    self.vehs.append(Vehicle('h', not self.ego.APs['lane'], 0))
Ashish Gaurav's avatar
Ashish Gaurav committed
484 485
                    self.vehs[curr_i + i].x = np.random.uniform(
                        rd.hlanes.stop_region[1], rd.hlanes.stop_region[0])
Aravind Bk's avatar
Aravind Bk committed
486 487
                elif j == 1:  # 'v'ertical left lane
                    self.vehs.append(Vehicle('v', 0, 0))
Ashish Gaurav's avatar
Ashish Gaurav committed
488 489
                    self.vehs[curr_i + i].y = np.random.uniform(
                        rd.hlanes.stop_region[1], rd.hlanes.stop_region[0])
Aravind Bk's avatar
Aravind Bk committed
490 491
                elif j == 2:  # 'v'ertical right lane
                    self.vehs.append(Vehicle('v', 1, 0))
Ashish Gaurav's avatar
Ashish Gaurav committed
492 493
                    self.vehs[curr_i + i].y = np.random.uniform(
                        rd.hlanes.stop_region[1], rd.hlanes.stop_region[0])
Aravind Bk's avatar
Aravind Bk committed
494 495 496 497 498 499
                else:
                    raise RuntimeError("Out of the index.")

                # update the maximum waited_count wrt the vehicles
                # generated at the previous step.
                if i > 0:
Ashish Gaurav's avatar
Ashish Gaurav committed
500 501 502
                    max_waited_count = max(
                        max_waited_count,
                        self.vehs[curr_i + i - 1].waited_count)
Aravind Bk's avatar
Aravind Bk committed
503 504 505

                self.vehs[curr_i + i].init_local_discrete_var()
                # randomly put waited_count from 0 to 21 + max_waited_count
Ashish Gaurav's avatar
Ashish Gaurav committed
506 507 508
                self.vehs[curr_i + i].waited_count = np.random.randint(
                    0, max_waited_count + 21)
                s_seq.append(0)  # s is zero as the vehicle has zero velocity.
Aravind Bk's avatar
Aravind Bk committed
509 510 511 512 513 514 515 516 517 518

            # Now, it successfully put the vehicles
            # on the stop regions.
            curr_i += n_others_stopped_in_stop_region
            self.init_APs(False)

        for i in range(curr_i, n_others + 1):
            success_flag = False
            while not success_flag:
                self.vehs.append(
Ashish Gaurav's avatar
Ashish Gaurav committed
519 520
                    Vehicle('h' if np.random.randint(0, 2) is 0 else 'v',
                            np.random.randint(0, 2), rd.speed_limit))
Aravind Bk's avatar
Aravind Bk committed
521 522

                if self.vehs[i].which_dir == 'h':
Ashish Gaurav's avatar
Ashish Gaurav committed
523 524
                    self.vehs[i].x = np.random.uniform(others_h_pos_min,
                                                       others_h_pos_max)
Aravind Bk's avatar
Aravind Bk committed
525
                elif self.vehs[i].which_dir == 'v':
Ashish Gaurav's avatar
Ashish Gaurav committed
526 527
                    self.vehs[i].y = np.random.uniform(others_v_pos_min,
                                                       others_v_pos_max)
Aravind Bk's avatar
Aravind Bk committed
528
                else:
Ashish Gaurav's avatar
Ashish Gaurav committed
529 530 531
                    raise ValueError(
                        "The Vehicle class constructor generates a vehicle which is neither 'h'- or 'v'-type."
                    )
Aravind Bk's avatar
Aravind Bk committed
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574

                self.vehs[i].init_local_discrete_var()
                self.init_APs(False)

                if ((self.vehs[i].APs['lane'] == self.ego.APs['lane']) and (allow_vehicles_in_ego_lane is False)) or \
                   (self.vehs[i].APs['in_intersection'] and not self.vehs[i].APs['intersection_is_clear']):
                    self.vehs.pop()
                    self.init_APs(False)
                    continue

                if self.vehs[i].APs['in_intersection']:
                    for veh in self.vehs:
                        # making every highest priority False as it is possible that one of them in the stop region is True.
                        veh.APs['highest_priority'] = False
                    self.vehs[i].APs['highest_priority'] = True

                s = calculate_s(self.vehs[i].v)
                s = self.__check_init_veh(self.vehs[i], s, v_max_multiplier)
                s_seq.append(s)
                self.init_APs(False)

                for j in range(0, i + 1):
                    k, V2V_dist = self.get_V2V_distance(j)
                    if k is not None:
                        if V2V_dist < 0 or \
                                (V2V_dist + s_seq[k] < s_seq[j] + self.veh_ahead_min):
                            self.vehs.pop()
                            s_seq.pop()
                            self.init_APs(False)
                            break
                    if j == i:
                        success_flag = True
                        break

        # Finally checks the constraints that every vehicle has to satisfy, and if not satisfied, modify the speeds.
        success_flag = False
        while not success_flag:
            success_flag = True
            for i, veh in enumerate(self.vehs):
                k, V2V_dist = self.get_V2V_distance(i)
                if k is None:
                    continue
                elif V2V_dist + s_seq[k] < s_seq[i] + self.veh_ahead_min:
Ashish Gaurav's avatar
Ashish Gaurav committed
575 576 577
                    s_seq[i] = SimpleIntersectionEnv.__regulate_veh_speed(
                        veh, V2V_dist + s_seq[k] - self.veh_ahead_min,
                        v_max_multiplier)
Aravind Bk's avatar
Aravind Bk committed
578 579 580 581 582 583
                    success_flag = False

        self.init_APs(False)

        # Perform the permutations among the vehs in self.vehs (give random indices for the vehs generated above)
        other_vehs_indices = np.random.permutation(n_others) + 1
Ashish Gaurav's avatar
Ashish Gaurav committed
584 585 586 587
        self.veh_ahead_index = other_vehs_indices[
            0] if veh_ahead_scenario else None
        self.stopped_veh_index = other_vehs_indices[
            veh_ahead_scenario] if stopped_car_scenario else None
Aravind Bk's avatar
Aravind Bk committed
588 589 590

        if n_others_stopped_in_stop_region:
            start_index = veh_ahead_scenario + stopped_car_scenario
Ashish Gaurav's avatar
Ashish Gaurav committed
591 592
            self.stopped_veh_in_stop_region_indices = other_vehs_indices[
                start_index:start_index + n_others_stopped_in_stop_region]
Aravind Bk's avatar
Aravind Bk committed
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611

        other_vehs = []
        for i in other_vehs_indices:
            other_vehs.append(self.vehs[i])

        self.vehs[1:] = other_vehs

        # create the vehicle network for graphical output
        self.vehicle_network = VehicleNetworkCross('car_agent.png', self)

        # Reset the variables related to model- and collision-checking and termination.
        self._reset_model_checker(self.ego.APs)
        self._check_collisions()
        self._check_ego_theta_out_of_range()

        return not self.termination_condition

    @staticmethod
    def __regulate_veh_speed(veh, dist, v_max_multiplier):
Ashish Gaurav's avatar
Ashish Gaurav committed
612 613 614
        """This private method set and regulate the speed of the vehicle in a
        way that it never requires to travel the given distance for complete
        stop.
Aravind Bk's avatar
Aravind Bk committed
615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679

        Args:
            veh: the vehicle reference.
            dist: the distance over which the vehicle has to not
                drive for complete stop.
            v_max_multiplier: the same as that in generate_scenario.

        Returns:
            the distance traveling when the vehicle is under
            the maximum de-acceleration to stop completely.
        """

        v_max = calculate_v_max(dist)
        veh.v = v_max_multiplier * np.random.uniform(0, v_max)

        s = calculate_s(veh.v)
        s = SimpleIntersectionEnv.__check_init_veh(veh, s, v_max_multiplier)

        return s

    @staticmethod
    def __check_init_veh(veh, s, v_max_multiplier):
        """Check a given initialized vehicle (self.vehs[i]) and modify its
        speed (v) if the vehicle is physically not able to stop at the stop
        region. This is internally called by the above method
        generate_scenario.

        Args:
            veh: the vehicle reference
            s: the distance traveling when the vehicle is under the maximum
                de-acceleration to stop completely.

        Returns:
            the same s to the input param s, but when the speed is modified.
        """
        veh.init_local_discrete_var()
        dist_before_stop_region = None

        if veh.which_dir is 'h':
            if veh.APs['in_stop_region']:
                dist_before_stop_region = rd.hlanes.stop_region[1] - veh.x

            elif (not veh.APs['has_entered_stop_region']) and \
                 (rd.hlanes.stop_region[0] - veh.x <= 2.0 * s):
                dist_before_stop_region = rd.hlanes.stop_region[0] - veh.x

            elif veh.APs['in_intersection']:
                v_max = calculate_v_max(veh.x - rd.hlanes.stop_region[0])
                veh.v = np.random.uniform(0, v_max)
                s = calculate_s(veh.v)

        elif veh.which_dir is 'v':
            if veh.APs['in_stop_region']:
                dist_before_stop_region = rd.vlanes.stop_region[1] - veh.y

            if (not veh.APs['has_entered_stop_region']) and \
               (rd.vlanes.stop_region[0] - veh.y <= 2.0 * s):
                dist_before_stop_region = rd.vlanes.stop_region[0] - veh.y

            elif veh.APs['in_intersection']:
                v_max = calculate_v_max(veh.v - rd.vlanes.stop_region[0])
                veh.v = np.random.uniform(0, v_max)
                s = calculate_s(veh.v)

        else:
Ashish Gaurav's avatar
Ashish Gaurav committed
680 681
            raise AssertionError(
                "which_dir in Vehicle class has to be either 'h' or 'v'.")
Aravind Bk's avatar
Aravind Bk committed
682 683

        if dist_before_stop_region is not None:
Ashish Gaurav's avatar
Ashish Gaurav committed
684 685
            v_max = calculate_v_max(
                dist_before_stop_region) if dist_before_stop_region > 0 else 0
Aravind Bk's avatar
Aravind Bk committed
686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
            if v_max < veh.v:
                veh.v = v_max_multiplier * np.random.uniform(0, v_max)
                s = calculate_s(veh.v)

        veh.init_local_discrete_var()

        return s

    def init_APs(self, update_local_APs=True):
        """Initialize the global (and local if update_local_APs is true) atomic
        propositions (APs) of each vehicle using the current state. This method
        is basically called by "generate_scenario" method. Compared with
        update_APs, the way of setting 'highest_priority' is different.

        Args:
            update_local_APs: True if the local APs are also necessary to
                be initialized; False if not, and only the global APs are
                required to be initialzied.
        """

        if update_local_APs is True:
            for veh in self.vehs:
                veh.update_local_APs()

        self.update_APs(False, False)

        index = None
        for i, veh in enumerate(self.vehs):
            if veh.APs['in_intersection']:
                if index is not None:
                    # if there are multiple cars in the intersection,
                    # then choose only one of them to give the highest
                    # priority, which has already done before in the loop.
                    veh.APs['highest_priority'] = 0
                else:
                    index = i
                    veh.APs['highest_priority'] = 1
            else:
                veh.APs['highest_priority'] = 0

        if index is not None:
            return

        waited_count = -1
        for i, veh in enumerate(self.vehs):
            if waited_count < veh.waited_count:
                waited_count = veh.waited_count
                index = i

        if index is not None:
            self.vehs[index].APs['highest_priority'] = True

    def update_APs(self, update_local_APs=True, update_highest_priority=True):
        """Update the global (and local if update_local_APs is true) atomic
        propositions (APs) of each vehicle using the current state. This method
        is basically called by "step" method below after it updates the
        vehicle's continuous state. By a global AP, it means that the state
        information of the other cars has to be used to update the AP (whereas
        a local AP means that the information of the other cars are not
        necessary). For comparison and more details, see also "updateLocalAPs"
        method in vehicles module.

        Args:
            update_local_APs: True if the local APs are also necessary to
                be update; False if not, and only the global APs are required
                to be updated.
        """

        if update_local_APs is True:
            for veh in self.vehs:
                veh.update_local_APs()

        for i, veh in enumerate(self.vehs):
            veh.APs['intersection_is_clear'] = True
            for j in range(0, len(self.vehs)):
                if j is not i:
                    veh.APs[
                        'intersection_is_clear'] &= not self.vehs[j].APs['in_intersection']

            if i == 0:
                veh.APs['target_lane'] = self.target_lane
            else:
                veh.APs['target_lane'] = self.vehs[i].APs['lane']

            j, V2V_dist = self.get_V2V_distance(i)

            if j is None:
                veh.APs['veh_ahead'] = False
            else:
                veh.APs['veh_ahead'] = True if \
                    0 < V2V_dist <= self.veh_ahead_max \
                    else False

            if not veh.APs['veh_ahead']:
                veh.APs['veh_ahead_stopped_now'] = False
                veh.APs['veh_ahead_too_close'] = False
            else:
Ashish Gaurav's avatar
Ashish Gaurav committed
783 784 785 786
                veh.APs['veh_ahead_stopped_now'] = self.vehs[j].APs[
                    'stopped_now']
                veh.APs[
                    'veh_ahead_too_close'] = True if 0 < V2V_dist <= self.veh_ahead_min else False
Aravind Bk's avatar
Aravind Bk committed
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810

        if update_highest_priority:
            waited_count = -1
            index = None
            for i, veh in enumerate(self.vehs):
                if veh.APs['highest_priority']:
                    if veh.APs['in_stop_region'] or veh.APs['in_intersection']:
                        return
                    else:
                        # Flag off the AP when the car gets out of the intersection.
                        veh.APs['highest_priority'] = False

                elif waited_count < veh.waited_count:
                    waited_count = veh.waited_count
                    index = i

            if (waited_count != -1) and (index is not None):
                self.vehs[index].APs['highest_priority'] = True

            # Checking that there exists only one True highest_priority.
            exists_highest_priority = False
            for veh in self.vehs:
                if veh.APs['highest_priority']:
                    if exists_highest_priority:
Ashish Gaurav's avatar
Ashish Gaurav committed
811 812 813
                        raise ValueError(
                            "The AP highest_priority is true for only one veh for each time."
                        )
Aravind Bk's avatar
Aravind Bk committed
814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
                    else:
                        exists_highest_priority = True

    def get_V2V_distance(self, veh_index=0):
        """Calculate the distance between the target and the nearest adjacent
        car in the same lane. The distance is given as a bumper-to-bumper one,
        so being it not positive means that the two cars collide.

        Args:
            veh_index: the index of the vehicle whose V2V distance is
                returned, ranging from 0 to len(self.vehs)
                 (with 0 for the index of the ego).

        Returns:
            a tuple (i, V2Vdist), where V2Vdist is the V2V distance
            from the indexed vehicle to the closest vehicle, with its index i,
            on the same route; i is None and V2Vdist == np.infty
            when there is no vehicle ahead.
        """
        if veh_index not in range(0, len(self.vehs)):
Ashish Gaurav's avatar
Ashish Gaurav committed
834 835 836
            raise ValueError(
                "veh_index in SimpleIntersectionEnv.get_V2V_distance out of index"
            )
Aravind Bk's avatar
Aravind Bk committed
837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880

        tar_veh = self.vehs[veh_index]

        V2V_dist = np.infty
        index = None
        for i, veh in enumerate(self.vehs):
            if i == veh_index:
                continue
            elif (veh.which_dir is tar_veh.which_dir) and \
                 (veh.APs['lane'] is tar_veh.APs['lane']):
                if (tar_veh.which_dir is 'h') and \
                   (0.0 <= veh.x - tar_veh.x < V2V_dist):
                    V2V_dist = veh.x - tar_veh.x
                    index = i
                elif (tar_veh.which_dir is 'v') and \
                     (0.0 <= veh.y - tar_veh.y < V2V_dist):
                    V2V_dist = veh.y - tar_veh.y
                    index = i

        return index, V2V_dist - 2.0 * VEHICLE_SIZE_HALF[0]

    def aggressive_driving_policy(self, veh_index, maneuver_opt=None):
        """Implementation of the aggressive driving policy. Calculate the
        (veh_index)-th vehicle's low-level action (a, dot_phi) using aggressive
        driving policy. If using aggressive driving policy for all vehicles,
        there is [has to be] no collision.

        Args:
            veh_index: the index of the vehicle to calculate the action
                (a, dot_phi) via aggressive driving policy.
            maneuver_opt: an option that specifies a maneuver to be executed; it can
                be 'stop', 'wait', etc.

        Returns:
            current low-level action (a, dot_phi) generated by the
            aggressive driving policy for the current state.
        """
        assert 0 <= veh_index <= len(self.vehs) - 1

        tar_veh = self.vehs[veh_index]

        i, V2V_dist = self.get_V2V_distance(veh_index)
        s = calculate_s(tar_veh.v)

Ashish Gaurav's avatar
Ashish Gaurav committed
881 882
        if i is not None and (V2V_dist + calculate_s(self.vehs[i].v) <
                              s + self.V2V_ref):
Aravind Bk's avatar
Aravind Bk committed
883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
            a = -MAX_ACCELERATION  # emergency stop

        elif i is not None and (V2V_dist <= self.V2V_ref + 6) and \
             ((not tar_veh.APs['has_entered_stop_region'] and
              (tar_veh.which_dir is 'h' and rd.hlanes.stop_region_centre - tar_veh.x > 2 * s or
               tar_veh.which_dir is 'v' and rd.vlanes.stop_region_centre - tar_veh.y > 2 * s)) or
              (tar_veh.APs['has_entered_stop_region'] and not tar_veh.APs['in_stop_region'])):
            a = 31.6228 * (V2V_dist - self.V2V_ref) - 7.9527 * (
                tar_veh.v - self.vehs[i].v) + self.vehs[i].acc

        elif tar_veh.APs['in_stop_region'] and \
                tar_veh.APs['highest_priority'] and \
                tar_veh.APs['intersection_is_clear'] and \
                maneuver_opt != 'stop':
            a = -100.0 * (tar_veh.v - rd.speed_limit)

        elif tar_veh.APs['in_stop_region'] and \
                not (tar_veh.APs['highest_priority'] and
                     tar_veh.APs['intersection_is_clear']):
Ashish Gaurav's avatar
Ashish Gaurav committed
902
            a = -2 * 7.9527 * tar_veh.v - 0.5 * np.clip(20 * tar_veh.v, -1, 1)
Aravind Bk's avatar
Aravind Bk committed
903 904 905 906 907

        elif (not tar_veh.APs['has_entered_stop_region']) and \
             ((tar_veh.which_dir is 'h') and (0 <= rd.hlanes.stop_region[1] - tar_veh.x <= 3 * s)) or \
             ((tar_veh.which_dir is 'v') and (0 <= rd.vlanes.stop_region[1] - tar_veh.y <= 3 * s)):
            if tar_veh.which_dir is 'h':
Ashish Gaurav's avatar
Ashish Gaurav committed
908 909
                a = -31.6228 * (rd.hlanes.stop_region_centre -
                                tar_veh.x) - 7.9527 * tar_veh.v
Aravind Bk's avatar
Aravind Bk committed
910
            else:
Ashish Gaurav's avatar
Ashish Gaurav committed
911 912
                a = -31.6228 * (rd.vlanes.stop_region_centre -
                                tar_veh.y) - 7.9527 * tar_veh.v
Aravind Bk's avatar
Aravind Bk committed
913 914 915 916 917 918
        else:
            a = -10.0 * (tar_veh.v - rd.speed_limit)

        return np.clip(a, -MAX_ACCELERATION, MAX_ACCELERATION), 0

    def _check_ego_theta_out_of_range(self):
Ashish Gaurav's avatar
Ashish Gaurav committed
919 920
        self.__ego_theta_out_of_range = abs(
            self.ego.theta) > self.max_ego_theta
Aravind Bk's avatar
Aravind Bk committed
921 922

    def _check_collisions(self):
Ashish Gaurav's avatar
Ashish Gaurav committed
923
        """returns True when collision happens, False if not."""
Aravind Bk's avatar
Aravind Bk committed
924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945
        self.__ego_collision_happened = self.check_ego_collision() or \
                                        self.is_ego_off_road()
        self.__others_collision_happened = self.check_other_veh_collisions()

    @property
    def collision_happened(self):
        return self.__ego_collision_happened or self.__others_collision_happened

    def check_ego_collision(self):
        """Checks if ego collides with any other vehicle in the environment.
        Uses bounding boxes to check if a seperating line exists between given
        vehicle and each of the other vehicles.

        Returns:     True if ego collides with another vehicle, False
        otherwise
        """

        current_veh_bb = self.ego.get_bounding_box()
        for i, veh in enumerate(self.vehs):
            if i is EGO_INDEX:
                continue
            other_veh_bb = veh.get_bounding_box()
Ashish Gaurav's avatar
Ashish Gaurav committed
946 947
            if bb_utils.does_bounding_box_intersect(current_veh_bb,
                                                    other_veh_bb):
Aravind Bk's avatar
Aravind Bk committed
948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004
                #print("\nEgo collided with vehicle[", i, "]")
                return True

        return False

    def check_other_veh_collisions(self):
        """Checks if other vehicles other than ego collide in the environment.
        Uses V2V distance for vehicles in same lane, otherwise uses bounding
        boxes Checks all vehicles other than ego.

        Returns: True if a vehicle collides with another vehicle, False otherwise
        """

        for index in range(len(self.vehs)):
            if index is EGO_INDEX:
                continue
            current_veh = self.vehs[index]
            for j in range(index + 1, len(self.vehs)):
                veh = self.vehs[j]
                # check collision using V2V for other vehicles only in same lane and travelling in same direction
                if veh.which_dir is current_veh.which_dir and veh.APs['lane'] is current_veh.APs['lane']:
                    if current_veh.which_dir is 'h':
                        v2v_dist = abs(
                            veh.x - current_veh.x) - 2.0 * VEHICLE_SIZE_HALF[0]
                    else:
                        v2v_dist = abs(
                            veh.y - current_veh.y) - 2.0 * VEHICLE_SIZE_HALF[0]
                    if v2v_dist < 0:
                        # TODO: Remove this print or log this somehow
                        #print("\nVehicle[", index, "] collided with vehicle[", j,"]")
                        return True
                #  otherwise, check collision using BB only if at intersection
                elif current_veh.APs['in_intersection']:
                    current_veh_bb = current_veh.get_bounding_box()
                    other_veh_bb = veh.get_bounding_box()
                    if bb_utils.does_bounding_box_intersect(
                            current_veh_bb, other_veh_bb):
                        #print("\nVehicle [", index, "] collided with vehicle [", j,"]")
                        return True

        return False

    def is_ego_off_road(self):
        """Checks if ego goes off-road.

        Returns:     True if ego goes off-road, False otherwise
        """

        # check if ego off-road from horizontal road
        h_offroad = not self.ego.is_within_road_boundaries(rd.hlanes)

        # check if ego off-road from vertical road
        v_offroad = not self.ego.is_within_road_boundaries(rd.vlanes)

        return True if (h_offroad and v_offroad) else False

    def cost(self, u):
Ashish Gaurav's avatar
Ashish Gaurav committed
1005 1006
        """Calculate the driving cost of the ego, i.e., the negative reward for
        the ego-driving.
Aravind Bk's avatar
Aravind Bk committed
1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020

        Args:
            u: the low-level input (a, dot_psi) to the ego vehicle.

        Returns:
            cost: the weighted sum of squares of the components in
                the vector "vec." Note that a cost is a negative reward.
        """

        # input clipping.
        if abs(u[0]) > MAX_ACCELERATION:
            u[0] = np.clip(u[0], -MAX_ACCELERATION, MAX_ACCELERATION)

        if abs(u[1]) > MAX_STEERING_ANGLE_RATE:
Ashish Gaurav's avatar
Ashish Gaurav committed
1021 1022
            u[1] = np.clip(u[1], -MAX_STEERING_ANGLE_RATE,
                           MAX_STEERING_ANGLE_RATE)
Aravind Bk's avatar
Aravind Bk committed
1023 1024 1025 1026 1027 1028 1029 1030

        y_err = self.ego.y - rd.hlanes.centres[self.target_lane]
        # The target theta error is '0', corresponding to the direction of the horizontal lane.
        # That is, theta_err = self.ego.theta - 'target_theta' = self.ego.theta - 0.
        theta_err = self.ego.theta
        theta_dot_err = self.ego.v * np.tan(self.ego.psi) / VEHICLE_WHEEL_BASE

        # for more details, see the Paxton's paper (2017, IROS)...
Ashish Gaurav's avatar
Ashish Gaurav committed
1031 1032 1033
        vec = (y_err, theta_err, theta_dot_err, self.ego.v - self.v_ref,
               max(0, self.ego.v - rd.speed_limit), u[0],
               (u[0] - self.ego.acc) / DT, u[1])
Aravind Bk's avatar
Aravind Bk committed
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045

        if self.normalize_cost:
            vec = self.normalize_tuple(vec)

        if len(self.cost_weights) != len(vec):
            raise ValueError("the dimension mismatch between weights \
                              and the vector.")

        return np.inner(self.cost_weights, np.square(vec))

    #TODO: Move this to utilities
    def normalize_tuple(self, vec, scale_factor=10):
Ashish Gaurav's avatar
Ashish Gaurav committed
1046 1047 1048
        """Normalizes each element in a tuple according to ranges defined in
        self.cost_normalization_ranges. Normalizes between 0 and 1. And the
        scales by scale_factor.
Aravind Bk's avatar
Aravind Bk committed
1049 1050 1051 1052 1053 1054 1055 1056 1057

        Args:
            vec: The tuple to be normalized
            scale_factor: the scaling factor to multiply the normalized values by

        Returns:
            The normalized tuple
        """
        if len(self.cost_normalization_ranges) != len(vec):
Ashish Gaurav's avatar
Ashish Gaurav committed
1058 1059
            raise ValueError(
                "dimension mismatch between cost_normalization_ranges \
Aravind Bk's avatar
Aravind Bk committed
1060 1061 1062 1063 1064 1065
                              and the vector.")

        normalized_vec = []
        for index, value in enumerate(vec):
            min_value = self.cost_normalization_ranges[index][0]
            max_value = self.cost_normalization_ranges[index][1]
Ashish Gaurav's avatar
Ashish Gaurav committed
1066 1067
            normalized_vec.append(
                ((value - min_value) / (max_value - min_value)) * scale_factor)
Aravind Bk's avatar
Aravind Bk committed
1068 1069 1070 1071

        return tuple(normalized_vec)

    def get_features_tuple(self):
Ashish Gaurav's avatar
Ashish Gaurav committed
1072
        """Get/calculate the features wrt. the current state variables.
Aravind Bk's avatar
Aravind Bk committed
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101

        Returns features tuple
        """

        return Features(self).get_features_tuple()

    def step(self, u_ego):
        """Gym compliant step function.

        Calculate the next continuous and discrete states of every vehicle
        using the given action u_ego for ego-vehicle and aggressive driving
        policy for the other vehicles. The variables in self.vehs will be
        automatically updated. Then, call the options _step and get the reward
        and terminal values.

        Args:
            u_ego: the low level action [a, dot_phi] of the ego vehicle.

        Returns:
            new_state: the new state of the environment
            R: reward obtained
            terminal: whether or not the current policy finished, that is,
            its termination_condition() is satisfied; TODO - also return
            true when the environment's state indicates the episode is
            over.
            info: information variables
        """

        # TODO: remove the line below after debugging output scaling in keras_learner.py.
Ashish Gaurav's avatar
Ashish Gaurav committed
1102
        u_ego = [2 * u_ego[0], u_ego[1]]  # Rescaling (temporary code).
Aravind Bk's avatar
Aravind Bk committed
1103

Ashish Gaurav's avatar
Ashish Gaurav committed
1104
        reward = -self.cost(u_ego)  # a cost is defined as a negative reward.
Aravind Bk's avatar
Aravind Bk committed
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125

        for i, veh in enumerate(self.vehs):
            # For the current version, the ego vehicle also follows the
            # aggressive driving policy. Later, it'll follow a presumably
            # better but complex policy learnt by reinforcement learning.
            #
            if i is 0:  # ego vehicle case,
                veh.step(u_ego)
            else:  # the other vehicle cases,
                veh.step(self.aggressive_driving_policy(i))

        self.update_APs(False)
        self._check_collisions()
        self._check_ego_theta_out_of_range()

        self._set_mc_AP(self.ego.APs)
        _, _, terminal, info = super().step(u_ego)

        if self.termination_condition:
            if self.collision_happened:
                if self.__ego_collision_happened:
Ashish Gaurav's avatar
Ashish Gaurav committed
1126 1127
                    self._terminal_reward_superposition(
                        -self._penalty_in_ego_collision)
Aravind Bk's avatar
Aravind Bk committed
1128 1129 1130 1131 1132
                    # TODO: discussion of how to implement log and gym-compliant info
                    # TODO: Make a detailed log about collision.
                    info['episode_termination_reason'] = "ego_collision"

                else:
Ashish Gaurav's avatar
Ashish Gaurav committed
1133 1134
                    info[
                        'episode_termination_reason'] = "other_vehicle_collision"
Aravind Bk's avatar
Aravind Bk committed
1135 1136

            if self.__ego_theta_out_of_range:
Ashish Gaurav's avatar
Ashish Gaurav committed
1137 1138
                self._terminal_reward_superposition(
                    -self._penalty_in_ego_theta_out_of_range)
Aravind Bk's avatar
Aravind Bk committed
1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
                info['episode_termination_reason'] = "heading_angle_deviation"

            terminal = True

        if self._r_terminal is None:
            reward = reward
        else:
            reward = reward + self._r_terminal

        return self.get_features_tuple(), reward, terminal, info

    def is_terminal(self):
        """Check if the environment is at a terminal state or not.

        Args:
            env: environment instance

        Returns True if the environment has terminated
        """

1159 1160
        model_checks_violated = self._LTL_preconditions_enable and \
                                self.current_model_checking_result()
Aravind Bk's avatar
Aravind Bk committed
1161 1162 1163 1164
        reached_goal = self._terminate_in_goal and self.goal_achieved
        self._check_collisions()
        self._check_ego_theta_out_of_range()
        terminated = self.termination_condition
1165

Aravind Bk's avatar
Aravind Bk committed
1166 1167 1168 1169 1170 1171 1172 1173 1174
        return model_checks_violated or reached_goal or terminated

    @property
    def termination_condition(self):
        return super().termination_condition or self.__ego_theta_out_of_range\
               or self.collision_happened

    @property
    def goal_achieved(self):
Ashish Gaurav's avatar
Ashish Gaurav committed
1175 1176 1177 1178 1179 1180
        """A property from the base class which is True if the goal of the road
        scenario is achieved, otherwise False.

        This property is used in both step of EpisodicEnvBase and the
        implementation of the high-level reinforcement learning and
        execution.
Aravind Bk's avatar
Aravind Bk committed
1181 1182 1183 1184
        """

        return (self.ego.x >= rd.hlanes.end_pos) and \
               not self.collision_happened and \
1185
               (self.ego.v <= 1.1*rd.speed_limit)
Aravind Bk's avatar
Aravind Bk committed
1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222

    def reset(self):
        """Gym compliant reset function.

        Reinitialize this environment with whatever parameters it was initially
        called with.

        Returns:
            new_state: the environment state after reset
        """

        while not self.generate_scenario(**self.scenario_option):
            pass

        return self.get_features_tuple()

    def draw(self, info=None):
        """Draw the background, road network and vehicle network. Also show
        information as passed on by info.

        Args:
            info: the information string to be shown on the window
        """

        self.background.draw()

        # draw the road network
        self.road_network.draw()

        # draw the vehicle network
        self.vehicle_network.draw(self.ego_info_text)

        if info is not None:
            label = Text(info, x=10, y=NUM_VPIXELS - 20, multiline=True)
            label.draw()

    def render(self):
Ashish Gaurav's avatar
Ashish Gaurav committed
1223
        """Gym compliant render function."""
Aravind Bk's avatar
Aravind Bk committed
1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250

        if self.window is None:
            self.window = config_Pyglet(pyglet, NUM_HPIXELS, NUM_VPIXELS)

        self.window.clear()
        self.window.switch_to()
        self.window.dispatch_events()

        # Text information about ego vehicle's states
        info = "Ego Attributes:" + get_APs(
            self, EGO_INDEX, 'in_stop_region',
            'has_entered_stop_region', 'has_stopped_in_stop_region',
            'before_but_close_to_stop_region', 'intersection_is_clear',
            'stopped_now', 'in_intersection', 'over_speed_limit',
            'on_route', 'highest_priority', 'intersection_is_clear',
            'veh_ahead', 'lane') + \
            get_veh_attributes(self, EGO_INDEX, 'waited_count', 'v', 'acc', 'psi_dot')

        self.draw(info)

        self.window.flip()

    def set_ego_info_text(self, info):
        self.ego_info_text = info

    @property
    def action_space(self):
Ashish Gaurav's avatar
Ashish Gaurav committed
1251 1252 1253
        return gym.spaces.Box(
            low=np.array([-MAX_ACCELERATION, -MAX_STEERING_ANGLE_RATE]),
            high=np.array([MAX_ACCELERATION, MAX_STEERING_ANGLE_RATE]))
Aravind Bk's avatar
Aravind Bk committed
1254 1255 1256 1257

    @property
    def observation_space(self):
        length = len(self.get_features_tuple())
Ashish Gaurav's avatar
Ashish Gaurav committed
1258 1259 1260 1261
        return gym.spaces.Box(
            low=-np.finfo(np.float32).max,
            high=np.finfo(np.float32).max,
            shape=(length, ))
Aravind Bk's avatar
Aravind Bk committed
1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273

    def copy(self):
        # Make a copy of self.env
        # Assumption: all environments ship with this method
        backup_background = self.background
        backup_road_network = self.road_network
        backup_vehicle_network = self.vehicle_network
        backup_window = self.window
        self.background = None
        self.road_network = None
        self.vehicle_network = None
        self.window = None
Ashish Gaurav's avatar
Ashish Gaurav committed
1274

Aravind Bk's avatar
Aravind Bk committed
1275 1276 1277 1278 1279 1280 1281 1282
        new_env = deepcopy(self)

        self.background = backup_background
        self.road_network = backup_road_network
        self.vehicle_network = backup_vehicle_network
        self.window = backup_window

        return new_env
1283 1284 1285 1286 1287 1288 1289 1290 1291

    @property
    def n_others_with_higher_priority(self):
        n_others_with_higher_priority = 0
        for veh in self.vehs[1:]:
            if veh.waited_count > self.ego.waited_count:
                n_others_with_higher_priority += 1

        return n_others_with_higher_priority