[docs]classMetricsLogger:""" Logs time-series data for an episode, including queues, actions, and rewards. """def__init__(self):self.episode_logs=[]self._current_episode_queues=[]self._current_episode_actions=[]self._current_episode_rewards=[]
[docs]deflog_step(self,queues,action,reward):""" Records the metrics for a single simulation step. Args: queues (np.ndarray): The current queue lengths. action (np.ndarray): The action taken by the agent. reward (float): The reward received. """self._current_episode_queues.append(queues.tolist())self._current_episode_actions.append(action.tolist())self._current_episode_rewards.append(reward)
[docs]defend_episode(self):""" Aggregates and stores the logs for the completed episode. """ifnotself._current_episode_queues:return# Avoid saving empty episodeslog_entry={"queues":self._current_episode_queues,"actions":self._current_episode_actions,"rewards":self._current_episode_rewards,}self.episode_logs.append(log_entry)# Reset for the next episodeself._current_episode_queues=[]self._current_episode_actions=[]self._current_episode_rewards=[]
[docs]defsave(self,filepath:str):""" Saves the logged data to a JSON file. Args: filepath: The path to the output file. """withopen(filepath,'w')asf:json.dump(self.episode_logs,f,indent=4)