美文网首页
RL: py_pong.py

RL: py_pong.py

作者: 魏鹏飞 | 来源:发表于2020-04-13 10:45 被阅读0次

    Keywords:

    batch_size、learning_rate、gamma、decay_rate、render、rmsprop、policy_forward、policy_backward、eps=1e-5、

    py_pong.py
    # From http://karpathy.github.io/2016/05/31/rl/
    """ Trains an agent with (stochastic) Policy Gradients on Pong. Uses OpenAI Gym. """
    import numpy as np
    import pickle
    import gym
    
    # hyperparameters
    H = 200 # number of hidden layer neurons
    batch_size = 10 # every how many episodes to do a param update?
    learning_rate = 1e-4
    gamma = 0.99 # discount factor for reward
    decay_rate = 0.99 # decay factor for RMSProp leaky sum of grad^2
    resume = False # resume from previous checkpoint?
    test = True # test mode, turn off epsilon-greedy and render the scene
    save_file = 'pong_model_bolei.p'
    
    if test == True:
        render = True
    else:
        render = False
    
    # model initialization
    D = 80 * 80 # input dimensionality: 80x80 grid
    if resume:
      model = pickle.load(open(save_file, 'rb'))
    else:
      model = {}
      model['W1'] = np.random.randn(H,D) / np.sqrt(D) # "Xavier" initialization
      model['W2'] = np.random.randn(H) / np.sqrt(H)
    
    grad_buffer = { k : np.zeros_like(v) for k,v in model.items() } # update buffers that add up gradients over a batch
    rmsprop_cache = { k : np.zeros_like(v) for k,v in model.items() } # rmsprop memory
    
    def sigmoid(x):
      return 1.0 / (1.0 + np.exp(-x)) # sigmoid "squashing" function to interval [0,1]
    
    def prepro(I):
      """ prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector """
      I = I[35:195] # crop
      I = I[::2,::2,0] # downsample by factor of 2
      I[I == 144] = 0 # erase background (background type 1)
      I[I == 109] = 0 # erase background (background type 2)
      I[I != 0] = 1 # everything else (paddles, ball) just set to 1
      return I.astype(np.float).ravel()
    
    def discount_rewards(r):
      """ take 1D float array of rewards and compute discounted reward """
      discounted_r = np.zeros_like(r)
      running_add = 0
      for t in reversed(range(0, r.size)):
        if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!)
        running_add = running_add * gamma + r[t]
        discounted_r[t] = running_add
      return discounted_r
    
    def policy_forward(x):
      h = np.dot(model['W1'], x)
      h[h<0] = 0 # ReLU nonlinearity
      logp = np.dot(model['W2'], h)
      p = sigmoid(logp)
      return p, h # return probability of taking action 2, and hidden state
    
    def policy_backward(eph, epdlogp):
      """ backward pass. (eph is array of intermediate hidden states) """
      dW2 = np.dot(eph.T, epdlogp).ravel()
      dh = np.outer(epdlogp, model['W2'])
      dh[eph <= 0] = 0 # backpro prelu
      dW1 = np.dot(dh.T, epx)
      return {'W1':dW1, 'W2':dW2}
    
    env = gym.make("Pong-v0")
    observation = env.reset()
    prev_x = None # used in computing the difference frame
    xs,hs,dlogps,drs = [],[],[],[]
    running_reward = None
    reward_sum = 0
    episode_number = 0
    while True:
      if render: env.render()
    
      # preprocess the observation, set input to network to be difference image
      cur_x = prepro(observation)
      x = cur_x - prev_x if prev_x is not None else np.zeros(D)
      prev_x = cur_x
    
      # forward the policy network and sample an action from the returned probability
      aprob, h = policy_forward(x)
      if test == True:
        action = 2 if aprob>0.5 else 3
      else:
        action = 2 if np.random.uniform() < aprob else 3 # roll the dice!
    
      # record various intermediates (needed later for backprop)
      xs.append(x) # observation
      hs.append(h) # hidden state
      y = 1 if action == 2 else 0 # a "fake label"
      dlogps.append(y - aprob) # grad that encourages the action that was taken to be taken (see http://cs231n.github.io/neural-networks-2/#losses if confused)
    
      # step the environment and get new measurements
      observation, reward, done, info = env.step(action)
      reward_sum += reward
    
      drs.append(reward) # record reward (has to be done after we call step() to get reward for previous action)
    
      if done: # an episode finished
        episode_number += 1
    
        # stack together all inputs, hidden states, action gradients, and rewards for this episode
        epx = np.vstack(xs)
        eph = np.vstack(hs)
        epdlogp = np.vstack(dlogps)
        epr = np.vstack(drs)
        xs,hs,dlogps,drs = [],[],[],[] # reset array memory
    
        # compute the discounted reward backwards through time
        discounted_epr = discount_rewards(epr)
        # standardize the rewards to be unit normal (helps control the gradient estimator variance)
        discounted_epr -= np.mean(discounted_epr)
        discounted_epr /= np.std(discounted_epr)
    
        epdlogp *= discounted_epr # modulate the gradient with advantage (PG magic happens right here.)
        grad = policy_backward(eph, epdlogp)
        for k in model: grad_buffer[k] += grad[k] # accumulate grad over batch
    
        # perform rmsprop parameter update every batch_size episodes
        if episode_number % batch_size == 0 and test == False:
          for k,v in model.items():
            g = grad_buffer[k] # gradient
            rmsprop_cache[k] = decay_rate * rmsprop_cache[k] + (1 - decay_rate) * g**2
            model[k] += learning_rate * g / (np.sqrt(rmsprop_cache[k]) + 1e-5)
            grad_buffer[k] = np.zeros_like(v) # reset batch gradient buffer
    
        # boring book-keeping
        running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01
        print('resetting env. episode reward total was %f. running mean: %f' % (reward_sum, running_reward))
        if episode_number % 100 == 0: pickle.dump(model, open(save_file, 'wb'))
        reward_sum = 0
        observation = env.reset() # reset env
        prev_x = None
    
      if reward != 0: # Pong has either +1 or -1 reward exactly when game ends.
        print ('ep %d: game finished, reward: %f' % (episode_number, reward)) , ('' if reward == -1 else ' !!!!!!!!')
    
    
    
    # Results:
    python pg_pong.py
    
    ep 0: game finished, reward: -1.000000
    ep 0: game finished, reward: -1.000000
    ep 0: game finished, reward: -1.000000
    ep 0: game finished, reward: -1.000000
    ep 0: game finished, reward: -1.000000
    ep 0: game finished, reward: -1.000000
    ep 0: game finished, reward: -1.000000
    ep 0: game finished, reward: -1.000000
    ep 0: game finished, reward: -1.000000
    ep 0: game finished, reward: -1.000000
    ep 0: game finished, reward: -1.000000
    ep 0: game finished, reward: -1.000000
    ep 0: game finished, reward: -1.000000
    ep 0: game finished, reward: -1.000000
    ep 0: game finished, reward: -1.000000
    ep 0: game finished, reward: -1.000000
    ep 0: game finished, reward: -1.000000
    ep 0: game finished, reward: -1.000000
    ep 0: game finished, reward: -1.000000
    ep 0: game finished, reward: -1.000000
    resetting env. episode reward total was -21.000000. running mean: -21.000000
    ......
    ......
    ......
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: 1.000000
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: 1.000000
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: 1.000000
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: -1.000000
    ep 8: game finished, reward: -1.000000
    resetting env. episode reward total was -18.000000. running mean: -20.922252
    ......
    ......
    ......
    
    

    At a glance

    Training Test

    相关文章

      网友评论

          本文标题:RL: py_pong.py

          本文链接:https://www.haomeiwen.com/subject/oezwmhtx.html