diff --git a/MetaAugment/autoaugment_learners/aa_learner.py b/MetaAugment/autoaugment_learners/aa_learner.py
index 665a5b3a9fa2719f5933d808744010a9aa41ebe0..950b7e11b8223b22208414594176b7a2911501dd 100644
--- a/MetaAugment/autoaugment_learners/aa_learner.py
+++ b/MetaAugment/autoaugment_learners/aa_learner.py
@@ -403,4 +403,26 @@ class aa_learner:
 
         self.num_pols_tested += 1
         self.history.append((policy,accuracy))
-        return accuracy
\ No newline at end of file
+        return accuracy
+    
+
+    def get_mega_policy(self, number_policies):
+        """
+        Produces a mega policy, based on the n best subpolicies (evo learner)/policies
+        (other learners)
+
+        
+        Args: 
+            number_policies -> int: Number of (sub)policies to be included in the mega
+            policy
+
+        Returns:
+            megapolicy -> [subpolicy, subpolicy, ...]
+        """
+        inter_pol = sorted(self.history, key=lambda x: x[1], reverse = True)[:number_policies]
+
+        megapol = []
+        for pol in inter_pol:
+            megapol += pol[0]
+
+        return megapol
diff --git a/MetaAugment/autoaugment_learners/evo_learner.py b/MetaAugment/autoaugment_learners/evo_learner.py
index 22ed9811ba4fb28e931e232c07bb3c5f2fa3980e..a7dc580d2c1942993848f6022d9b04bcca6718a2 100644
--- a/MetaAugment/autoaugment_learners/evo_learner.py
+++ b/MetaAugment/autoaugment_learners/evo_learner.py
@@ -221,21 +221,21 @@ class evo_learner(aa_learner):
             self.train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=self.batch_size)
 
             for idx, (test_x, label_x) in enumerate(self.train_loader):
-                full_policy = self._get_single_policy_cov(test_x)
+                sub_pol = self._get_single_policy_cov(test_x)
 
 
-                while self._in_pol_dict(full_policy):
-                    full_policy = self._get_single_policy_cov(test_x)[0]
+                while self._in_pol_dict(sub_pol):
+                    sub_pol = self._get_single_policy_cov(test_x)[0]
 
 
-            fit_val = self._test_autoaugment_policy(full_policy,child_network_architecture,train_dataset,test_dataset)
+            fit_val = self._test_autoaugment_policy(sub_pol,child_network_architecture,train_dataset,test_dataset)
 
-            self.running_policy.append((full_policy, fit_val))
+
+            self.running_policy.append((sub_pol, fit_val))
 
             if len(self.running_policy) > self.sp_num:
                 self.running_policy = sorted(self.running_policy, key=lambda x: x[1], reverse=True)
                 self.running_policy = self.running_policy[:self.sp_num]
-                print("appended policy: ", self.running_policy)
 
 
             if len(self.history_best) == 0: