From bd840c0d8ce43cb87855bf6b7a42a342ace79620 Mon Sep 17 00:00:00 2001
From: Sun Jin Kim <sk2521@ic.ac.uk>
Date: Tue, 26 Apr 2022 22:40:39 +0100
Subject: [PATCH] privatise certain methods in aa_learner's

---
 .../autoaugment_learners/aa_learner.py        | 16 +++++------
 .../autoaugment_learners/autoaugment.py       |  6 ++---
 .../autoaugment_learners/gru_learner.py       | 12 ++++-----
 .../randomsearch_learner.py                   | 16 +++++------
 .../autoaugment_learners/ucb_learner.py       |  8 +++---
 benchmark/scripts/util_04_22.py               |  2 +-
 .../autoaugment_learners/aa_learners.rst      |  4 ++-
 ...ugment.autoaugment_learners.aa_learner.rst |  4 ---
 ...gment.autoaugment_learners.evo_learner.rst | 27 +++++++++++++++++++
 ...gment.autoaugment_learners.gru_learner.rst |  4 ---
 ...oaugment_learners.randomsearch_learner.rst |  6 -----
 ...gment.autoaugment_learners.ucb_learner.rst | 24 +++++++++++++++++
 docs/source/usage/autoaugment_helperclass.rst |  2 +-
 test/MetaAugment/test_aa_learner.py           | 18 ++++++-------
 test/MetaAugment/test_gru_learner.py          |  6 ++---
 test/MetaAugment/test_randomsearch_learner.py |  6 ++---
 16 files changed, 100 insertions(+), 61 deletions(-)
 create mode 100644 docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.evo_learner.rst
 create mode 100644 docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.ucb_learner.rst

diff --git a/MetaAugment/autoaugment_learners/aa_learner.py b/MetaAugment/autoaugment_learners/aa_learner.py
index 756ae33e..48c05b95 100644
--- a/MetaAugment/autoaugment_learners/aa_learner.py
+++ b/MetaAugment/autoaugment_learners/aa_learner.py
@@ -101,7 +101,7 @@ class aa_learner:
         self.op_tensor_length = self.fun_num + p_bins + m_bins if discrete_p_m else self.fun_num +2
 
 
-    def translate_operation_tensor(self, operation_tensor, return_log_prob=False, argmax=False):
+    def _translate_operation_tensor(self, operation_tensor, return_log_prob=False, argmax=False):
         """
         takes in a tensor representing an operation and returns an actual operation which
         is in the form of:
@@ -220,7 +220,7 @@ class aa_learner:
             return operation
         
 
-    def generate_new_policy(self):
+    def _generate_new_policy(self):
         """
         Generate a new policy which can be fed into an AutoAugment object 
         by calling:
@@ -243,7 +243,7 @@ class aa_learner:
                         by calling: AutoAugment.subpolicies = policy
         """
 
-        raise NotImplementedError('generate_new_policy not implemented in aa_learner')
+        raise NotImplementedError('_generate_new_policy not implemented in aa_learner')
 
 
     def learn(self, train_dataset, test_dataset, child_network_architecture, iterations=15):
@@ -290,10 +290,10 @@ class aa_learner:
                       different policies
             
             for _ in range(15):
-                policy = self.generate_new_policy()
+                policy = self._generate_new_policy()
 
                 pprint(policy)
-                reward = self.test_autoaugment_policy(policy,
+                reward = self._test_autoaugment_policy(policy,
                                         child_network_architecture,
                                         train_dataset,
                                         test_dataset)
@@ -302,7 +302,7 @@ class aa_learner:
         """
     
 
-    def test_autoaugment_policy(self,
+    def _test_autoaugment_policy(self,
                                 policy,
                                 child_network_architecture,
                                 train_dataset,
@@ -396,10 +396,10 @@ class aa_learner:
     #     # This is dummy code
     #     # test out `n` random policies
     #     for _ in range(n):
-    #         policy = self.generate_new_policy()
+    #         policy = self._generate_new_policy()
 
     #         pprint(policy)
-    #         reward, acc_list = self.test_autoaugment_policy(policy,
+    #         reward, acc_list = self._test_autoaugment_policy(policy,
     #                                             child_network_architecture,
     #                                             train_dataset,
     #                                             test_dataset,
diff --git a/MetaAugment/autoaugment_learners/autoaugment.py b/MetaAugment/autoaugment_learners/autoaugment.py
index 5a8ecbcf..6baa28cd 100644
--- a/MetaAugment/autoaugment_learners/autoaugment.py
+++ b/MetaAugment/autoaugment_learners/autoaugment.py
@@ -446,7 +446,7 @@ if __name__=='__main__':
 
 
 
-    def test_autoaugment_policy(subpolicies, train_dataset, test_dataset):
+    def _test_autoaugment_policy(subpolicies, train_dataset, test_dataset):
 
         aa_transform = AutoAugment()
         aa_transform.subpolicies = subpolicies
@@ -470,8 +470,8 @@ if __name__=='__main__':
         return best_acc, acc_log
 
 
-    _, acc_log1 = test_autoaugment_policy(subpolicies1, train_dataset, test_dataset)
-    _, acc_log2 = test_autoaugment_policy(subpolicies2, train_dataset, test_dataset)
+    _, acc_log1 = _test_autoaugment_policy(subpolicies1, train_dataset, test_dataset)
+    _, acc_log2 = _test_autoaugment_policy(subpolicies2, train_dataset, test_dataset)
 
     plt.plot(acc_log1, label='subpolicies1')
     plt.plot(acc_log2, label='subpolicies2')
diff --git a/MetaAugment/autoaugment_learners/gru_learner.py b/MetaAugment/autoaugment_learners/gru_learner.py
index 5c15a4a4..7ca8088c 100644
--- a/MetaAugment/autoaugment_learners/gru_learner.py
+++ b/MetaAugment/autoaugment_learners/gru_learner.py
@@ -85,7 +85,7 @@ class gru_learner(aa_learner):
         self.softmax = torch.nn.Softmax(dim=0)
 
 
-    def generate_new_policy(self):
+    def _generate_new_policy(self):
         """
         The GRU controller pops out a new policy.
 
@@ -101,7 +101,7 @@ class gru_learner(aa_learner):
         choice of function, prob, and mag seperately, so that the
         resulting tensor's values sums up to 3.
 
-        Then we input each tensor into self.translate_operation_tensor
+        Then we input each tensor into self._translate_operation_tensor
         with parameter (return_log_prob=True), which outputs a tuple
         in the form of ('img_function_name', prob, mag) and a float
         representing the log probability that we chose the chosen 
@@ -150,8 +150,8 @@ class gru_learner(aa_learner):
             op2 = softmaxed_vectors[2*subpolicy_idx+1]
 
             # translate both vectors
-            op1, log_prob1 = self.translate_operation_tensor(op1, return_log_prob=True)
-            op2, log_prob2 = self.translate_operation_tensor(op2, return_log_prob=True)
+            op1, log_prob1 = self._translate_operation_tensor(op1, return_log_prob=True)
+            op2, log_prob2 = self._translate_operation_tensor(op2, return_log_prob=True)
             
             new_policy.append((op1,op2))
             log_prob += (log_prob1+log_prob2)
@@ -177,10 +177,10 @@ class gru_learner(aa_learner):
 
             for k in range(self.cont_mb_size):
                 # log_prob is $\sum_{t=1}^T log(P(a_t|a_{(t-1):1};\theta_c))$, used in PPO
-                policy, log_prob = self.generate_new_policy()
+                policy, log_prob = self._generate_new_policy()
 
                 pprint(policy)
-                reward = self.test_autoaugment_policy(policy,
+                reward = self._test_autoaugment_policy(policy,
                                                     child_network_architecture, 
                                                     train_dataset,
                                                     test_dataset)
diff --git a/MetaAugment/autoaugment_learners/randomsearch_learner.py b/MetaAugment/autoaugment_learners/randomsearch_learner.py
index 2c35fb80..25fbd112 100644
--- a/MetaAugment/autoaugment_learners/randomsearch_learner.py
+++ b/MetaAugment/autoaugment_learners/randomsearch_learner.py
@@ -46,7 +46,7 @@ class randomsearch_learner(aa_learner):
                     )
         
 
-    def generate_new_discrete_operation(self):
+    def _generate_new_discrete_operation(self):
         """
         generate a new random operation in the form of a tensor of dimension:
             (fun_num + 11 + 10)
@@ -74,7 +74,7 @@ class randomsearch_learner(aa_learner):
         return torch.cat([fun_t, prob_t, mag_t])
 
 
-    def generate_new_continuous_operation(self):
+    def _generate_new_continuous_operation(self):
         """
         Returns operation_tensor, which is a tensor representation of a random operation with
         dimension:
@@ -101,7 +101,7 @@ class randomsearch_learner(aa_learner):
         return fun_p_m
 
 
-    def generate_new_policy(self):
+    def _generate_new_policy(self):
         """
         Generates a new policy, with the elements chosen at random
         (unifom random distribution).
@@ -115,10 +115,10 @@ class randomsearch_learner(aa_learner):
             for i in range(2):
                 # if our agent uses discrete representations of probability and magnitude
                 if self.discrete_p_m:
-                    new_op = self.generate_new_discrete_operation()
+                    new_op = self._generate_new_discrete_operation()
                 else:
-                    new_op = self.generate_new_continuous_operation()
-                new_op = self.translate_operation_tensor(new_op)
+                    new_op = self._generate_new_continuous_operation()
+                new_op = self._translate_operation_tensor(new_op)
                 ops.append(new_op)
 
             new_subpolicy = tuple(ops)
@@ -135,10 +135,10 @@ class randomsearch_learner(aa_learner):
             iterations=15):
         # test out `iterations` number of  random policies
         for _ in range(iterations):
-            policy = self.generate_new_policy()
+            policy = self._generate_new_policy()
 
             pprint(policy)
-            reward = self.test_autoaugment_policy(policy,
+            reward = self._test_autoaugment_policy(policy,
                                                 child_network_architecture,
                                                 train_dataset,
                                                 test_dataset)
diff --git a/MetaAugment/autoaugment_learners/ucb_learner.py b/MetaAugment/autoaugment_learners/ucb_learner.py
index fdf735be..6ed010fc 100644
--- a/MetaAugment/autoaugment_learners/ucb_learner.py
+++ b/MetaAugment/autoaugment_learners/ucb_learner.py
@@ -47,7 +47,7 @@ class ucb_learner(randomsearch_learner):
         # attributes used in the UCB1 algorithm
         self.num_policies = num_policies
 
-        self.policies = [self.generate_new_policy() for _ in range(num_policies)]
+        self.policies = [self._generate_new_policy() for _ in range(num_policies)]
 
         self.avg_accs = [None]*self.num_policies
         self.best_avg_accs = []
@@ -67,7 +67,7 @@ class ucb_learner(randomsearch_learner):
                     and add to our list of policies
         """
 
-        self.policies += [self.generate_new_policy() for _ in range(n)]
+        self.policies += [self._generate_new_policy() for _ in range(n)]
 
         # all the below need to be lengthened to store information for the 
         # new policies
@@ -96,7 +96,7 @@ class ucb_learner(randomsearch_learner):
                 # test that one
                 this_policy_idx = self.avg_accs.index(None)
                 this_policy = self.policies[this_policy_idx]
-                acc = self.test_autoaugment_policy(
+                acc = self._test_autoaugment_policy(
                                 this_policy,
                                 child_network_architecture,
                                 train_dataset,
@@ -111,7 +111,7 @@ class ucb_learner(randomsearch_learner):
                 # one with the best q_plus_cnt value
                 this_policy_idx = np.argmax(self.q_plus_cnt)
                 this_policy = self.policies[this_policy_idx]
-                acc = self.test_autoaugment_policy(
+                acc = self._test_autoaugment_policy(
                                 this_policy,
                                 child_network_architecture,
                                 train_dataset,
diff --git a/benchmark/scripts/util_04_22.py b/benchmark/scripts/util_04_22.py
index 62c0456a..8d7aa6b1 100644
--- a/benchmark/scripts/util_04_22.py
+++ b/benchmark/scripts/util_04_22.py
@@ -111,7 +111,7 @@ def rerun_best_policy(
         print(f'{_}/{repeat_num}')
         temp_agent = aal.aa_learner(**config)
         accs.append(
-                temp_agent.test_autoaugment_policy(megapol,
+                temp_agent._test_autoaugment_policy(megapol,
                                     child_network_architecture,
                                     train_dataset,
                                     test_dataset,
diff --git a/docs/source/MetaAugment_library/autoaugment_learners/aa_learners.rst b/docs/source/MetaAugment_library/autoaugment_learners/aa_learners.rst
index 4fc99146..db9d7b9f 100644
--- a/docs/source/MetaAugment_library/autoaugment_learners/aa_learners.rst
+++ b/docs/source/MetaAugment_library/autoaugment_learners/aa_learners.rst
@@ -6,5 +6,7 @@ AutoAugment learners
    :toctree: generated
 
    MetaAugment.autoaugment_learners.aa_learner
+   MetaAugment.autoaugment_learners.evo_learner
    MetaAugment.autoaugment_learners.gru_learner
-   MetaAugment.autoaugment_learners.randomsearch_learner
\ No newline at end of file
+   MetaAugment.autoaugment_learners.randomsearch_learner
+   MetaAugment.autoaugment_learners.ucb_learner
\ No newline at end of file
diff --git a/docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.aa_learner.rst b/docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.aa_learner.rst
index 010241cb..85be0d01 100644
--- a/docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.aa_learner.rst
+++ b/docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.aa_learner.rst
@@ -14,11 +14,7 @@
    .. autosummary::
    
       ~aa_learner.__init__
-      ~aa_learner.demo_plot
-      ~aa_learner.generate_new_policy
       ~aa_learner.learn
-      ~aa_learner.test_autoaugment_policy
-      ~aa_learner.translate_operation_tensor
    
    
 
diff --git a/docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.evo_learner.rst b/docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.evo_learner.rst
new file mode 100644
index 00000000..37f06b00
--- /dev/null
+++ b/docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.evo_learner.rst
@@ -0,0 +1,27 @@
+MetaAugment.autoaugment\_learners.evo\_learner
+==============================================
+
+.. currentmodule:: MetaAugment.autoaugment_learners
+
+.. autoclass:: evo_learner
+
+   
+   .. automethod:: __init__
+
+   
+   .. rubric:: Methods
+
+   .. autosummary::
+   
+      ~evo_learner.__init__
+      ~evo_learner.get_full_policy
+      ~evo_learner.get_single_policy_cov
+      ~evo_learner.in_pol_dict
+      ~evo_learner.learn
+      ~evo_learner.set_up_instance
+   
+   
+
+   
+   
+   
\ No newline at end of file
diff --git a/docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.gru_learner.rst b/docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.gru_learner.rst
index f5cbe442..23eb306c 100644
--- a/docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.gru_learner.rst
+++ b/docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.gru_learner.rst
@@ -14,11 +14,7 @@
    .. autosummary::
    
       ~gru_learner.__init__
-      ~gru_learner.demo_plot
-      ~gru_learner.generate_new_policy
       ~gru_learner.learn
-      ~gru_learner.test_autoaugment_policy
-      ~gru_learner.translate_operation_tensor
    
    
 
diff --git a/docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.randomsearch_learner.rst b/docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.randomsearch_learner.rst
index 5bfb303d..72903e47 100644
--- a/docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.randomsearch_learner.rst
+++ b/docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.randomsearch_learner.rst
@@ -14,13 +14,7 @@
    .. autosummary::
    
       ~randomsearch_learner.__init__
-      ~randomsearch_learner.demo_plot
-      ~randomsearch_learner.generate_new_continuous_operation
-      ~randomsearch_learner.generate_new_discrete_operation
-      ~randomsearch_learner.generate_new_policy
       ~randomsearch_learner.learn
-      ~randomsearch_learner.test_autoaugment_policy
-      ~randomsearch_learner.translate_operation_tensor
    
    
 
diff --git a/docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.ucb_learner.rst b/docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.ucb_learner.rst
new file mode 100644
index 00000000..83f80f48
--- /dev/null
+++ b/docs/source/MetaAugment_library/autoaugment_learners/generated/MetaAugment.autoaugment_learners.ucb_learner.rst
@@ -0,0 +1,24 @@
+MetaAugment.autoaugment\_learners.ucb\_learner
+==============================================
+
+.. currentmodule:: MetaAugment.autoaugment_learners
+
+.. autoclass:: ucb_learner
+
+   
+   .. automethod:: __init__
+
+   
+   .. rubric:: Methods
+
+   .. autosummary::
+   
+      ~ucb_learner.__init__
+      ~ucb_learner.learn
+      ~ucb_learner.make_more_policies
+   
+   
+
+   
+   
+   
\ No newline at end of file
diff --git a/docs/source/usage/autoaugment_helperclass.rst b/docs/source/usage/autoaugment_helperclass.rst
index 75f080fb..cc361da4 100644
--- a/docs/source/usage/autoaugment_helperclass.rst
+++ b/docs/source/usage/autoaugment_helperclass.rst
@@ -11,7 +11,7 @@ we use as a helper class to help us apply AutoAugment policies to datasets.
 This is a tutorial (in the sense describe in https://documentation.divio.com/structure/).
 
 For an example of how the material is used in our library, see the source code of
-:meth:`aa_learner.test_autoaugment_policy <MetaAugment.autoaugment_learners.aa_learner>`.
+:meth:`aa_learner._test_autoaugment_policy <MetaAugment.autoaugment_learners.aa_learner>`.
 
 Let's say we have a policy within the search space specified by the original 
 AutoAugment paper:
diff --git a/test/MetaAugment/test_aa_learner.py b/test/MetaAugment/test_aa_learner.py
index 8960746c..64517cd0 100644
--- a/test/MetaAugment/test_aa_learner.py
+++ b/test/MetaAugment/test_aa_learner.py
@@ -7,13 +7,13 @@ import torchvision.datasets as datasets
 import random
 
 
-def test_translate_operation_tensor():
+def test__translate_operation_tensor():
     """
-    See if aa_learner class's translate_operation_tensor works
+    See if aa_learner class's _translate_operation_tensor works
     by feeding many (valid) inputs in it.
 
     We make a lot of (fun_num+p_bins_m_bins,) size tensors, softmax 
-    them, and feed them through the translate_operation_tensor method
+    them, and feed them through the _translate_operation_tensor method
     to see if it doesn't break
     """
 
@@ -44,7 +44,7 @@ def test_translate_operation_tensor():
         mag_t = softmax(mag_t * alpha)
         softmaxed_vector = torch.cat((fun_t, prob_t, mag_t))
 
-        agent.translate_operation_tensor(softmaxed_vector)
+        agent._translate_operation_tensor(softmaxed_vector)
     
 
     # discrete_p_m=False
@@ -73,10 +73,10 @@ def test_translate_operation_tensor():
 
         softmaxed_vector = torch.cat((fun_t, prob_t, mag_t))
 
-        agent.translate_operation_tensor(softmaxed_vector)
+        agent._translate_operation_tensor(softmaxed_vector)
 
 
-def test_test_autoaugment_policy():
+def test__test_autoaugment_policy():
     agent = aal.aa_learner(
                 sp_num=5,
                 p_bins=11,
@@ -107,7 +107,7 @@ def test_test_autoaugment_policy():
                             train=False, download=True,
                             transform=torchvision.transforms.ToTensor())
 
-    acc = agent.test_autoaugment_policy(
+    acc = agent._test_autoaugment_policy(
                                         policy,
                                         child_network_architecture,
                                         train_dataset,
@@ -134,7 +134,7 @@ def test_exclude_method():
         exclude_method=exclude_method
     )
     for _ in range(200):
-        new_pol, _ = agent.generate_new_policy()
+        new_pol, _ = agent._generate_new_policy()
         print(new_pol)
         for (op1, op2) in new_pol:
             image_function_1 = op1[0]
@@ -146,7 +146,7 @@ def test_exclude_method():
         exclude_method=exclude_method
     )
     for _ in range(200):
-        new_pol, _ = agent.generate_new_policy()
+        new_pol= agent._generate_new_policy()
         print(new_pol)
         for (op1, op2) in new_pol:
             image_function_1 = op1[0]
diff --git a/test/MetaAugment/test_gru_learner.py b/test/MetaAugment/test_gru_learner.py
index cd52b0e9..b2ea8930 100644
--- a/test/MetaAugment/test_gru_learner.py
+++ b/test/MetaAugment/test_gru_learner.py
@@ -6,9 +6,9 @@ import torchvision.datasets as datasets
 
 import random
 
-def test_generate_new_policy():
+def test__generate_new_policy():
     """
-    make sure gru_learner.generate_new_policy() is robust
+    make sure gru_learner._generate_new_policy() is robust
     with respect to different values of sp_num, fun_num, 
     p_bins, and m_bins
     """
@@ -24,7 +24,7 @@ def test_generate_new_policy():
             cont_mb_size=2
             )
         for _ in range(4):
-            new_policy = agent.generate_new_policy()
+            new_policy = agent._generate_new_policy()
             assert isinstance(new_policy[0], list), new_policy
 
 
diff --git a/test/MetaAugment/test_randomsearch_learner.py b/test/MetaAugment/test_randomsearch_learner.py
index 61e9f9cd..6c5a9350 100644
--- a/test/MetaAugment/test_randomsearch_learner.py
+++ b/test/MetaAugment/test_randomsearch_learner.py
@@ -6,9 +6,9 @@ import torchvision.datasets as datasets
 
 import random
 
-def test_generate_new_policy():
+def test__generate_new_policy():
     """
-    make sure randomsearch_learner.generate_new_policy() is robust
+    make sure randomsearch_learner._generate_new_policy() is robust
     with respect to different values of sp_num, fun_num, 
     p_bins, and m_bins
     """
@@ -27,7 +27,7 @@ def test_generate_new_policy():
                 discrete_p_m=discrete_p_m
                 )
             for _ in range(4):
-                new_policy = agent.generate_new_policy()
+                new_policy = agent._generate_new_policy()
                 assert isinstance(new_policy, list), new_policy
     
     discrete_p_m = True
-- 
GitLab