This is a method of evaluating strategies for the multi-armed bandit problem 1. The testbed works as follows:
- Generate $10$ reward means $\mu_i$ associated with $10$ actions $a_i$
- On each iteration allow the agent to take some action $a_j$, and receive a reward $r_t \sim \mathcal N(\mu_j, 1)$.
We repeat this for $100$ randomly sampled sets of $\mu_i$. The agent’s goal is to maximize average rewards. Hopefully, it should learn which action has the highest mean and sample from that.
Strategy 1: $\epsilon$-greedy
class GreedyEpsilon:
def __init__(self, n_actions, eps, reward_fn, bias=0.0):
self.n_actions = n_actions
self.Q = np.array([bias] * n_actions)
self.n_moves = np.zeros((n_actions,))
self.eps = eps
self.reward_fn = reward_fn
self.total_reward = 0.0
def step(self):
if np.random.rand() < self.eps:
action = np.random.randint(0, self.n_actions)
else:
action = np.argmax(self.Q)
reward = self.reward_fn(action)
self.n_moves[action] += 1
self.Q[action] += 1.0 / self.n_moves[action] * (reward - self.Q[action])
self.total_reward += reward
return reward
Strategy 2: Cheat the system
We’re not supposed to sample more than once, but we will to illustrate an upper bound in performance.
class CheatingModel:
def __init__(self, n_actions, eps, reward_fn, bias=0.0):
self.n_actions = n_actions
self.reward_fn = reward_fn
self.total_reward = 0.0
def step(self):
reward = max(self.reward_fn(action) for action in range(self.n_actions))
self.total_reward += reward
return reward
Results
Full Code (matplotlib version)
import numpy as np
from tqdm import trange
import matplotlib.pyplot as plt
def normal_reward(action, action_to_reward_mu, reward_std, n_samples):
return np.random.normal(action_to_reward_mu[action], reward_std, n_samples)
class CheatingModel:
def __init__(self, n_actions, eps, reward_fn, bias=0.0):
self.n_actions = n_actions
self.reward_fn = reward_fn
self.total_reward = 0.0
def step(self):
reward = max(self.reward_fn(action) for action in range(self.n_actions))
self.total_reward += reward
return reward
class GreedyEpsilon:
def __init__(self, n_actions, eps, reward_fn, bias=0.0):
self.n_actions = n_actions
self.Q = np.array([bias] * n_actions)
self.n_moves = np.zeros((n_actions,))
self.eps = eps
self.reward_fn = reward_fn
self.total_reward = 0.0
def step(self):
if np.random.rand() < self.eps:
action = np.random.randint(0, self.n_actions)
else:
action = np.argmax(self.Q)
reward = self.reward_fn(action)
self.n_moves[action] += 1
self.Q[action] += 1.0 / self.n_moves[action] * (reward - self.Q[action])
self.total_reward += reward
return reward
def main():
N_ACTIONS = 10
N_DISTRIBUTIONS = 100
reward_std = 1.0
n_steps = 2000
epsilon_values = [0.0, 0.01, 0.1, 0.2]
avg_rewards = {epsilon: np.zeros((n_steps,)) for epsilon in epsilon_values}
avg_rewards["cheating"] = np.zeros((n_steps,))
bias_values = [0.0, 0.5, 1.0]
avg_rewards_bias = {bias: np.zeros((n_steps,)) for bias in bias_values}
for _ in trange(N_DISTRIBUTIONS):
action_to_reward_mu = np.random.normal(0, 1, (N_ACTIONS,))
for epsilon in epsilon_values:
model = GreedyEpsilon(
N_ACTIONS,
epsilon,
lambda a: normal_reward(a, action_to_reward_mu, reward_std, 1),
)
for n in range(n_steps):
model.step()
avg_rewards[epsilon][n] += model.total_reward / (n + 1)
avg_rewards[epsilon] /= N_DISTRIBUTIONS
cheating_model = CheatingModel(
N_ACTIONS,
0,
lambda a: normal_reward(a, action_to_reward_mu, reward_std, 1),
)
for n in range(n_steps):
cheating_model.step()
avg_rewards["cheating"][n] += cheating_model.total_reward / (n + 1)
avg_rewards["cheating"] /= N_DISTRIBUTIONS
for bias in bias_values:
biased_model = GreedyEpsilon(
N_ACTIONS,
0.01,
lambda a: normal_reward(a, action_to_reward_mu, reward_std, 1),
bias=bias,
)
for n in range(n_steps):
biased_model.step()
avg_rewards_bias[bias][n] += biased_model.total_reward / (n + 1)
avg_rewards_bias[bias] /= N_DISTRIBUTIONS
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
for epsilon in epsilon_values:
plt.plot(avg_rewards[epsilon], label=f"epsilon={epsilon}")
plt.plot(avg_rewards["cheating"], label="cheating model", linestyle="--")
plt.legend()
plt.yscale("log")
plt.title("Epsilon Results")
plt.subplot(1, 2, 2)
for bias in bias_values:
plt.plot(avg_rewards_bias[bias], label=f"bias={bias}")
plt.plot(avg_rewards["cheating"], label="cheating model", linestyle="--")
plt.legend()
plt.yscale("log")
plt.title("Bias Results with Epsilon=0.01")
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
-
Reinforcement Learning by Sutton et al ↩︎