kerasrl_learner.py 20.3 KB
Newer Older
Aravind Bk's avatar
Aravind Bk committed
1 2 3 4 5 6 7 8 9 10
from .learner_base import LearnerBase

from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Input, Concatenate
from keras.optimizers import Adam
from keras.callbacks import TensorBoard

from rl.agents import DDPGAgent, DQNAgent
from rl.memory import SequentialMemory
from rl.random import OrnsteinUhlenbeckProcess
11
from rl.policy import GreedyQPolicy, EpsGreedyQPolicy, MaxBoltzmannQPolicy
Aravind Bk's avatar
Aravind Bk committed
12 13 14 15

from rl.callbacks import ModelIntervalCheckpoint

import numpy as np
Ashish Gaurav's avatar
Ashish Gaurav committed
16
import copy
Aravind Bk's avatar
Aravind Bk committed
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49

class DDPGLearner(LearnerBase):
    def __init__(self,
                 input_shape=(48, ),
                 nb_actions=2,
                 actor=None,
                 critic=None,
                 critic_action_input=None,
                 memory=None,
                 random_process=None,
                 **kwargs):
        """The constructor which sets the properties of the class.

        Args:
            input_shape: Shape of observation space, e.g (10,);
            nb_actions: number of values in action space;
            actor: Keras Model of actor which takes observation as input and outputs actions. Uses default if not given
            critic: Keras Model of critic which takes concatenation of observation and action and outputs a single
                value. Uses default if not given
            critic_action_input: Keras Input which was used in creating action input of the critic model.
                Uses default critic and action_input if not specified
            memory: KerasRL Memory. Uses default SequentialMemory if not given
            random_process: KerasRL random process. Uses default OrnsteinUhlenbeckProcess if not given
            **kwargs: other optional key-value arguments with defaults defined in property_defaults
        """
        super(DDPGLearner, self).__init__(input_shape, nb_actions, **kwargs)
        property_defaults = {
            "mem_size": 100000,  # size of memory
            "mem_window_length": 1,  # window length of memory
            "oup_theta": 0.15,  # OrnsteinUhlenbeckProcess theta
            "oup_mu": 0,  # OrnsteinUhlenbeckProcess mu
            "oup_sigma": 1,  # OrnsteinUhlenbeckProcess sigma
            "oup_sigma_min": 0.5,  # OrnsteinUhlenbeckProcess sigma min
Ashish Gaurav's avatar
Ashish Gaurav committed
50 51
            "oup_annealing_steps":
            500000,  # OrnsteinUhlenbeckProcess n-step annealing
Aravind Bk's avatar
Aravind Bk committed
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
            "nb_steps_warmup_critic": 100,  # steps for critic to warmup
            "nb_steps_warmup_actor": 100,  # steps for actor to warmup
            "target_model_update": 1e-3  # target model update frequency
        }

        for (prop, default) in property_defaults.items():
            setattr(self, prop, kwargs.get(prop, default))

        if actor is None:
            actor = self.get_default_actor_model()
        if critic is None or critic_action_input is None:
            critic, critic_action_input = self.get_default_critic_model()
        if memory is None:
            memory = self.get_default_memory()
        if random_process is None:
            random_process = self.get_default_randomprocess()

        #TODO: Add output scaling
        self.agent_model = self.create_agent(
            actor, critic, critic_action_input, memory, random_process)

    def get_default_actor_model(self):
        """Creates the default actor model.

        Returns:     Keras Model object of actor
        """
        actor = Sequential()
        actor.add(Flatten(input_shape=(1, ) + self.input_shape))
        actor.add(Dense(64, use_bias=False))
        actor.add(Activation('relu'))
        actor.add(Dense(64, use_bias=False))
        actor.add(Activation('relu'))
        actor.add(Dense(self.nb_actions, use_bias=True))
        actor.add(Activation('tanh'))

        # print(actor.summary())

        return actor

    def get_default_critic_model(self):
        """Creates the default critic model.

        Returns:     Keras Model object of critic
        """
        action_input = Input(shape=(self.nb_actions, ), name='action_input')
        observation_input = Input(
            shape=(1, ) + self.input_shape, name='observation_input')
        flattened_observation = Flatten()(observation_input)
        x = Concatenate()([action_input, flattened_observation])
        x = Dense(64, use_bias=False)(x)
        x = Activation('relu')(x)
        x = Dense(64, use_bias=False)(x)
        x = Activation('relu')(x)
        x = Dense(64, use_bias=False)(x)
        x = Activation('relu')(x)
        x = Dense(1, use_bias=True)(x)
        #x = Activation('linear')(x)
        critic = Model(inputs=[action_input, observation_input], outputs=x)
        # print(critic.summary())

        return critic, action_input

    def get_default_randomprocess(self):
        """Creates the default random process model.

        Returns:     KerasRL OrnsteinUhlenbeckProcess object
        """
        random_process = OrnsteinUhlenbeckProcess(
            size=self.nb_actions,
            theta=self.oup_theta,
            mu=self.oup_mu,
            sigma=self.oup_sigma,
            sigma_min=self.oup_sigma_min,
            n_steps_annealing=self.oup_annealing_steps)
        return random_process

    def get_default_memory(self):
        """Creates the default memory model.

        Returns:     KerasRL SequentialMemory object
        """
        memory = SequentialMemory(
            limit=self.mem_size, window_length=self.mem_window_length)
        return memory

    def create_agent(self, actor, critic, critic_action_input, memory,
                     random_process):
        """Creates a KerasRL DDPGAgent with given components.

        Args:
            actor: Keras Model of actor which takes observation as input and outputs actions.
            critic: Keras Model of critic that takes concatenation of observation and action and outputs a single value.
            critic_action_input: Keras Input which was used in creating action input of the critic model.
            memory: KerasRL Memory.
            random_process: KerasRL random process.

        Returns:
            KerasRL DDPGAgent object
        """
        agent = DDPGAgent(
            nb_actions=self.nb_actions,
            actor=actor,
            critic=critic,
            critic_action_input=critic_action_input,
            memory=memory,
            nb_steps_warmup_critic=self.nb_steps_warmup_critic,
            nb_steps_warmup_actor=self.nb_steps_warmup_actor,
            random_process=random_process,
            gamma=self.gamma,
            target_model_update=1e-3)

        # TODO: give params like lr_actor and lr_critic to set different lr of Actor and Critic.
Ashish Gaurav's avatar
Ashish Gaurav committed
164 165 166 167 168 169
        agent.compile(
            [
                Adam(lr=self.lr * 1e-2, clipnorm=1.),
                Adam(lr=self.lr, clipnorm=1.)
            ],
            metrics=['mae'])
Aravind Bk's avatar
Aravind Bk committed
170 171 172 173

        return agent

    def train(self,
Ashish Gaurav's avatar
Ashish Gaurav committed
174 175 176 177 178 179 180 181 182
              env,
              nb_steps=1000000,
              visualize=False,
              verbose=1,
              log_interval=10000,
              nb_max_episode_steps=200,
              model_checkpoints=False,
              checkpoint_interval=100000,
              tensorboard=False):
Aravind Bk's avatar
Aravind Bk committed
183 184 185

        callbacks = []
        if model_checkpoints:
Ashish Gaurav's avatar
Ashish Gaurav committed
186 187 188 189 190
            callbacks += [
                ModelIntervalCheckpoint(
                    './checkpoints/checkpoint_weights.h5f',
                    interval=checkpoint_interval)
            ]
Aravind Bk's avatar
Aravind Bk committed
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
        if tensorboard:
            callbacks += [TensorBoard(log_dir='./logs')]

        self.agent_model.fit(
            env,
            nb_steps=nb_steps,
            visualize=visualize,
            verbose=verbose,
            log_interval=log_interval,
            nb_max_episode_steps=nb_max_episode_steps,
            callbacks=callbacks)

    def save_model(self, file_name="test_weights.h5f", overwrite=True):
        self.agent_model.save_weights(file_name, overwrite=True)

    def test_model(self,
                   env,
                   nb_episodes=50,
209
                   callbacks=None,
Aravind Bk's avatar
Aravind Bk committed
210 211 212 213 214
                   visualize=True,
                   nb_max_episode_steps=200):
        self.agent_model.test(
            env,
            nb_episodes=nb_episodes,
215
            callbacks=callbacks,
Aravind Bk's avatar
Aravind Bk committed
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
            visualize=visualize,
            nb_max_episode_steps=nb_max_episode_steps)

    def load_model(self, file_name="test_weights.h5f"):
        self.agent_model.load_weights(file_name)

    def predict(self, observation):
        return self.agent_model.forward(observation)


class DQNLearner(LearnerBase):
    def __init__(self,
                 input_shape=(48, ),
                 nb_actions=5,
                 low_level_policies=None,
                 model=None,
                 policy=None,
                 memory=None,
234
                 test_policy=None,
Aravind Bk's avatar
Aravind Bk committed
235 236 237 238 239 240 241
                 **kwargs):
        """The constructor which sets the properties of the class.

        Args:
            input_shape: Shape of observation space, e.g (10,);
            nb_actions: number of values in action space;
            model: Keras Model of actor which takes observation as input and outputs actions. Uses default if not given
242
            policy: KerasRL Policy. Uses default RestrictedEpsGreedyQPolicy if not given
243
            memory: KerasRL Memory. Uses default SequentialMemory if not given
Aravind Bk's avatar
Aravind Bk committed
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
            **kwargs: other optional key-value arguments with defaults defined in property_defaults
        """
        super(DQNLearner, self).__init__(input_shape, nb_actions, **kwargs)
        property_defaults = {
            "mem_size": 100000,  # size of memory
            "mem_window_length": 1,  # window length of memory
            "target_model_update": 1e-3,  # target model update frequency
            "nb_steps_warmup": 100,  # steps for model to warmup
        }

        for (prop, default) in property_defaults.items():
            setattr(self, prop, kwargs.get(prop, default))

        if model is None:
            model = self.get_default_model()
        if policy is None:
            policy = self.get_default_policy()
261 262
        if test_policy is None:
            test_policy = self.get_default_test_policy()
Aravind Bk's avatar
Aravind Bk committed
263 264 265 266 267
        if memory is None:
            memory = self.get_default_memory()

        self.low_level_policies = low_level_policies

268
        self.agent_model = self.create_agent(model, policy, memory, test_policy)
Aravind Bk's avatar
Aravind Bk committed
269 270 271 272 273 274

    def get_default_model(self):
        """Creates the default model.

        Returns:     Keras Model object of actor
        """
275

Aravind Bk's avatar
Aravind Bk committed
276 277
        model = Sequential()
        model.add(Flatten(input_shape=(1, ) + self.input_shape))
278 279 280
        model.add(Dense(64, activation='relu'))
        model.add(Dense(64, activation='relu'))
        model.add(Dense(64, activation='tanh'))
Aravind Bk's avatar
Aravind Bk committed
281
        model.add(Dense(self.nb_actions))
282
        print(model.summary())
Aravind Bk's avatar
Aravind Bk committed
283 284 285 286

        return model

    def get_default_policy(self):
287 288 289 290
        return RestrictedEpsGreedyQPolicy(0.3)

    def get_default_test_policy(self):
        return RestrictedGreedyQPolicy()
Aravind Bk's avatar
Aravind Bk committed
291 292 293 294 295 296 297 298 299 300

    def get_default_memory(self):
        """Creates the default memory model.

        Returns:     KerasRL SequentialMemory object
        """
        memory = SequentialMemory(
            limit=self.mem_size, window_length=self.mem_window_length)
        return memory

301
    def create_agent(self, model, policy, memory, test_policy):
Aravind Bk's avatar
Aravind Bk committed
302 303 304 305 306 307 308 309 310
        """Creates a KerasRL DDPGAgent with given components.

        Args:
            model: Keras Model of model which takes observation as input and outputs discrete actions.
            memory: KerasRL Memory.

        Returns:
            KerasRL DQN object
        """
Ashish Gaurav's avatar
Ashish Gaurav committed
311 312 313 314 315 316 317 318
        agent = DQNAgentOverOptions(
            model=model,
            low_level_policies=self.low_level_policies,
            nb_actions=self.nb_actions,
            memory=memory,
            nb_steps_warmup=self.nb_steps_warmup,
            target_model_update=self.target_model_update,
            policy=policy,
319
            test_policy=test_policy,
Ashish Gaurav's avatar
Ashish Gaurav committed
320
            enable_dueling_network=True)
Aravind Bk's avatar
Aravind Bk committed
321 322 323 324 325 326

        agent.compile(Adam(lr=self.lr), metrics=['mae'])

        return agent

    def train(self,
Ashish Gaurav's avatar
Ashish Gaurav committed
327 328 329
              env,
              nb_steps=1000000,
              visualize=False,
330 331
              verbose=1,
              log_interval=10000,
Ashish Gaurav's avatar
Ashish Gaurav committed
332 333 334 335
              nb_max_episode_steps=200,
              tensorboard=False,
              model_checkpoints=False,
              checkpoint_interval=10000):
Aravind Bk's avatar
Aravind Bk committed
336 337 338

        callbacks = []
        if model_checkpoints:
Ashish Gaurav's avatar
Ashish Gaurav committed
339 340 341 342 343
            callbacks += [
                ModelIntervalCheckpoint(
                    './checkpoints/checkpoint_weights.h5f',
                    interval=checkpoint_interval)
            ]
Aravind Bk's avatar
Aravind Bk committed
344 345 346 347 348 349 350
        if tensorboard:
            callbacks += [TensorBoard(log_dir='./logs')]

        self.agent_model.fit(
            env,
            nb_steps=nb_steps,
            visualize=visualize,
351 352
            verbose=verbose,
            log_interval=log_interval,
Aravind Bk's avatar
Aravind Bk committed
353 354 355 356 357 358 359 360 361 362 363 364
            nb_max_episode_steps=nb_max_episode_steps,
            callbacks=callbacks)

    def save_model(self, file_name="test_weights.h5f", overwrite=True):
        self.agent_model.save_weights(file_name, overwrite=True)

    # TODO: very environment specific. Make it general
    def test_model(self,
                   env,
                   nb_episodes=5,
                   visualize=True,
                   nb_max_episode_steps=400,
Ashish Gaurav's avatar
Ashish Gaurav committed
365
                   success_reward_threshold=100):
Aravind Bk's avatar
Aravind Bk committed
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387

        print("Testing for {} episodes".format(nb_episodes))
        success_count = 0
        termination_reason_counter = {}
        for n in range(nb_episodes):
            env.reset()
            terminal = False
            step = 0
            episode_reward = 0
            while not terminal and step <= nb_max_episode_steps:
                if visualize:
                    env.render()
                features, R, terminal, info = env.execute_controller_policy()
                step += 1
                episode_reward += R
                if terminal:
                    if 'episode_termination_reason' in info:
                        termination_reason = info['episode_termination_reason']
                        if termination_reason in termination_reason_counter:
                            termination_reason_counter[termination_reason] += 1
                        else:
                            termination_reason_counter[termination_reason] = 1
388 389
                    #TODO: remove below env-specific code
                    if env.env.goal_achieved:
Aravind Bk's avatar
Aravind Bk committed
390
                        success_count += 1
391
                    env.reset()
Ashish Gaurav's avatar
Ashish Gaurav committed
392 393
                    print("Episode {}: steps:{}, reward:{}".format(
                        n + 1, step, episode_reward))
Aravind Bk's avatar
Aravind Bk committed
394

Ashish Gaurav's avatar
Ashish Gaurav committed
395 396 397
        print("\nPolicy succeeded {} times!".format(success_count))
        print("Failures due to:")
        print(termination_reason_counter)
Aravind Bk's avatar
Aravind Bk committed
398

Ashish Gaurav's avatar
Ashish Gaurav committed
399
        return [success_count, termination_reason_counter]
Aravind Bk's avatar
Aravind Bk committed
400 401 402 403 404 405 406 407 408 409 410

    def load_model(self, file_name="test_weights.h5f"):
        self.agent_model.load_weights(file_name)

    def predict(self, observation):
        return self.agent_model.forward(observation)

    def get_q_value(self, observation, action):
        return self.agent_model.get_modified_q_values(observation)[action]

    def get_q_value_using_option_alias(self, observation, option_alias):
Ashish Gaurav's avatar
Ashish Gaurav committed
411 412
        action_num = self.agent_model.low_level_policy_aliases.index(
            option_alias)
Aravind Bk's avatar
Aravind Bk committed
413 414 415
        return self.agent_model.get_modified_q_values(observation)[action_num]

    def get_softq_value_using_option_alias(self, observation, option_alias):
Ashish Gaurav's avatar
Ashish Gaurav committed
416 417
        action_num = self.agent_model.low_level_policy_aliases.index(
            option_alias)
Aravind Bk's avatar
Aravind Bk committed
418
        q_values = self.agent_model.get_modified_q_values(observation)
Ashish Gaurav's avatar
Ashish Gaurav committed
419 420 421 422 423 424
        # print('softq q_values are %s' % dict(zip(self.agent_model.low_level_policy_aliases, q_values)))
        # oq_values = copy.copy(q_values)
        if q_values[action_num] == -np.inf:
            return 0
        max_q_value = np.max(q_values)
        q_values = [np.exp(q_value - max_q_value) for q_value in q_values]
Ashish Gaurav's avatar
Ashish Gaurav committed
425
        relevant = q_values[action_num] / np.sum(q_values)
Ashish Gaurav's avatar
Ashish Gaurav committed
426
        # print('softq: %s -> %s' % (oq_values, relevant))
Aravind Bk's avatar
Aravind Bk committed
427 428 429
        return relevant


430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
class RestrictedEpsGreedyQPolicy(EpsGreedyQPolicy):
    """Implement the epsilon greedy policy

    Restricted Eps Greedy policy.
    This policy ensures that it never chooses the action whose value is -inf

    """

    def __init__(self, eps=.1):
        super(RestrictedEpsGreedyQPolicy, self).__init__(eps)

    def select_action(self, q_values):
        """Return the selected action

        # Arguments
            q_values (np.ndarray): List of the estimations of Q for each action

        # Returns
            Selection action
        """
        assert q_values.ndim == 1
        nb_actions = q_values.shape[0]
        index = list()

        for i in range(0, nb_actions):
            if q_values[i] != -np.inf:
                index.append(i)

        # every q_value is -np.inf (this sometimes inevitably happens within the fit and test functions
        # of kerasrl at the terminal stage as they force to call forward in Kerasrl-learner which calls this function.
460
        # TODO: exception process or some more process to choose action in this exceptional case.
461
        if len(index) < 1:
462 463 464
            # every q_value is -np.inf, we choose action = 0
            action = 0
            print("Warning: no action satisfies initiation condition, action = 0 is chosen by default.")
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493

        elif np.random.uniform() <= self.eps:
            action = index[np.random.random_integers(0, len(index) - 1)]

        else:
            action = np.argmax(q_values)

        return action


class RestrictedGreedyQPolicy(GreedyQPolicy):
    """Implement the epsilon greedy policy

    Restricted Greedy policy.
    This policy ensures that it never chooses the action whose value is -inf

    """

    def select_action(self, q_values):
        """Return the selected action

        # Arguments
            q_values (np.ndarray): List of the estimations of Q for each action

        # Returns
            Selection action
        """
        assert q_values.ndim == 1

494
        # TODO: exception process or some more process to choose action in this exceptional case.
495 496 497
        if np.max(q_values) == - np.inf:
            # every q_value is -np.inf, we choose action = 0
            action = 0
498
            print("Warning: no action satisfies initiation condition, action = 0 is chosen by default.")
499 500

        else:
501
            action = np.argmax(q_values)
502 503 504 505

        return action


Ashish Gaurav's avatar
Ashish Gaurav committed
506 507 508 509 510 511 512 513 514 515 516 517 518 519
class DQNAgentOverOptions(DQNAgent):
    def __init__(self,
                 model,
                 low_level_policies,
                 policy=None,
                 test_policy=None,
                 enable_double_dqn=True,
                 enable_dueling_network=False,
                 dueling_type='avg',
                 *args,
                 **kwargs):
        super(DQNAgentOverOptions, self).__init__(
            model, policy, test_policy, enable_double_dqn,
            enable_dueling_network, dueling_type, *args, **kwargs)
Aravind Bk's avatar
Aravind Bk committed
520 521 522

        self.low_level_policies = low_level_policies
        if low_level_policies is not None:
Ashish Gaurav's avatar
Ashish Gaurav committed
523 524
            self.low_level_policy_aliases = list(
                self.low_level_policies.keys())
Aravind Bk's avatar
Aravind Bk committed
525 526

    def __get_invalid_node_indices(self):
Ashish Gaurav's avatar
Ashish Gaurav committed
527 528
        """Returns a list of option indices that are invalid according to
        initiation conditions."""
529

Aravind Bk's avatar
Aravind Bk committed
530 531
        invalid_node_indices = list()
        for index, option_alias in enumerate(self.low_level_policy_aliases):
532
            # TODO: Locate reset_maneuver to another place as this is a "get" function.
Aravind Bk's avatar
Aravind Bk committed
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
            self.low_level_policies[option_alias].reset_maneuver()
            if not self.low_level_policies[option_alias].initiation_condition:
                invalid_node_indices.append(index)

        return invalid_node_indices

    def forward(self, observation):
        q_values = self.get_modified_q_values(observation)

        if self.training:
            action = self.policy.select_action(q_values=q_values)
        else:
            action = self.test_policy.select_action(q_values=q_values)

        # Book-keeping.
        self.recent_observation = observation
        self.recent_action = action

Ashish Gaurav's avatar
Ashish Gaurav committed
551
        # print('forward gives %s from %s' % (action, dict(zip(self.low_level_policy_aliases, q_values))))
Aravind Bk's avatar
Aravind Bk committed
552 553 554 555 556 557 558 559 560 561 562 563
        return action

    def get_modified_q_values(self, observation):
        state = self.memory.get_recent_state(observation)
        q_values = self.compute_q_values(state)

        if self.low_level_policies is not None:
            invalid_node_indices = self.__get_invalid_node_indices()

            for node_index in invalid_node_indices:
                q_values[node_index] = -np.inf

564
        return q_values