diff --git a/temp_util/parse_ds_cn_arch.py b/temp_util/parse_ds_cn_arch.py
index e783b7da25414778e52fa16d09eff75cf944593c..76d33c195ca53a6e05d9d4d144788cd2f691cfc9 100644
--- a/temp_util/parse_ds_cn_arch.py
+++ b/temp_util/parse_ds_cn_arch.py
@@ -1,28 +1,38 @@
 from ..MetaAugment.child_networks import *
 from ..MetaAugment.main import create_toy, train_child_network
 import torch
+import torchvision
 import torchvision.datasets as datasets
 import pickle
 
-def parse_ds_cn_arch(ds, ds_name, IsLeNet, transform):
-    # open data and apply these transformations
+def parse_ds_cn_arch(ds, ds_name, IsLeNet):
     if ds == "MNIST":
-        train_dataset = datasets.MNIST(root='./datasets/mnist/train', train=True, download=True, transform=transform)
-        test_dataset = datasets.MNIST(root='./datasets/mnist/test', train=False, download=True, transform=transform)
+        train_dataset = datasets.MNIST(root='./datasets/mnist/train', 
+                        train=True, download=True, transform=None)
+        test_dataset = datasets.MNIST(root='./datasets/mnist/test', train=False, 
+                        download=True, transform=torchvision.transforms.ToTensor())
     elif ds == "KMNIST":
-        train_dataset = datasets.KMNIST(root='./datasets/kmnist/train', train=True, download=True, transform=transform)
-        test_dataset = datasets.KMNIST(root='./datasets/kmnist/test', train=False, download=True, transform=transform)
+        train_dataset = datasets.KMNIST(root='./datasets/kmnist/train', 
+                        train=True, download=True, transform=None)
+        test_dataset = datasets.KMNIST(root='./datasets/kmnist/test', train=False, 
+                        download=True, transform=torchvision.transforms.ToTensor())
     elif ds == "FashionMNIST":
-        train_dataset = datasets.FashionMNIST(root='./datasets/fashionmnist/train', train=True, download=True, transform=transform)
-        test_dataset = datasets.FashionMNIST(root='./datasets/fashionmnist/test', train=False, download=True, transform=transform)
+        train_dataset = datasets.FashionMNIST(root='./datasets/fashionmnist/train', 
+                        train=True, download=True, transform=None)
+        test_dataset = datasets.FashionMNIST(root='./datasets/fashionmnist/test', train=False, 
+                        download=True, transform=torchvision.transforms.ToTensor())
     elif ds == "CIFAR10":
-        train_dataset = datasets.CIFAR10(root='./datasets/cifar10/train', train=True, download=True, transform=transform)
-        test_dataset = datasets.CIFAR10(root='./datasets/cifar10/test', train=False, download=True, transform=transform)
+        train_dataset = datasets.CIFAR10(root='./datasets/cifar10/train', 
+                        train=True, download=True, transform=None)
+        test_dataset = datasets.CIFAR10(root='./datasets/cifar10/test', train=False, 
+                        download=True, transform=torchvision.transforms.ToTensor())
     elif ds == "CIFAR100":
-        train_dataset = datasets.CIFAR100(root='./datasets/cifar100/train', train=True, download=True, transform=transform)
-        test_dataset = datasets.CIFAR100(root='./datasets/cifar100/test', train=False, download=True, transform=transform)
+        train_dataset = datasets.CIFAR100(root='./datasets/cifar100/train', 
+                        train=True, download=True, transform=None)
+        test_dataset = datasets.CIFAR100(root='./datasets/cifar100/test', train=False, 
+                        download=True, transform=torchvision.transforms.ToTensor())
     elif ds == 'Other':
-        dataset = datasets.ImageFolder('./datasets/upload_dataset/'+ ds_name, transform=transform)
+        dataset = datasets.ImageFolder('./datasets/upload_dataset/'+ ds_name, transform=None)
         len_train = int(0.8*len(dataset))
         train_dataset, test_dataset = torch.utils.data.random_split(dataset, [len_train, len(dataset)-len_train])
 
@@ -43,12 +53,12 @@ def parse_ds_cn_arch(ds, ds_name, IsLeNet, transform):
 
         
     if IsLeNet == "LeNet":
-        model = LeNet(img_height, img_width, num_labels, img_channels)
+        child_architecture = LeNet(img_height, img_width, num_labels, img_channels)
     elif IsLeNet == "EasyNet":
-        model = EasyNet(img_height, img_width, num_labels, img_channels)
+        child_architecture = EasyNet(img_height, img_width, num_labels, img_channels)
     elif IsLeNet == 'SimpleNet':
-        model = SimpleNet(img_height, img_width, num_labels, img_channels)
+        child_architecture = SimpleNet(img_height, img_width, num_labels, img_channels)
     else:
-        model = pickle.load(open(f'datasets/childnetwork', "rb"))
+        child_architecture = pickle.load(open(f'datasets/childnetwork', "rb"))
 
-    return train_dataset, test_dataset, model 
\ No newline at end of file
+    return train_dataset, test_dataset, child_architecture 
\ No newline at end of file
diff --git a/temp_util/wapp_util.py b/temp_util/wapp_util.py
index 5ea55018b78337fd71951d4d1d0ce3847ff50ad8..cde572f7f1e0ba1590fb5685b7212f4d0b3b173a 100644
--- a/temp_util/wapp_util.py
+++ b/temp_util/wapp_util.py
@@ -15,46 +15,62 @@ import MetaAugment.child_networks as cn
 from MetaAugment.main import create_toy
 
 import pickle
-
+from pprint import pprint
+from .parse_ds_cn_arch import parse_ds_cn_arch
 def parse_users_learner_spec(
+            # things we need to feed into string parser
+            ds, 
+            ds_name, 
+            IsLeNet, 
             # aalearner type
             auto_aug_learner, 
             # search space settings
-            ds, 
-            ds_name, 
             exclude_method, 
             num_funcs, 
             num_policies, 
             num_sub_policies, 
             # child network settings
             toy_size, 
-            IsLeNet, 
             batch_size, 
             early_stop_num, 
             iterations, 
             learning_rate, 
             max_epochs
             ):
+    train_dataset, test_dataset, child_archi = parse_ds_cn_arch(
+                                                    ds, 
+                                                    ds_name, 
+                                                    IsLeNet
+                                                    )
     """
     The website receives user inputs on what they want the aa_learner
     to be. We take those hyperparameters and return an aa_learner
 
     """
     if auto_aug_learner == 'UCB':
-        policies = aal.ucb_learner.generate_policies(num_policies, num_sub_policies)
-        q_values, best_q_values = aal.ucb_learner.run_UCB1(
-                                                policies,
-                                                batch_size, 
-                                                learning_rate, 
-                                                ds, 
-                                                toy_size, 
-                                                max_epochs, 
-                                                early_stop_num, 
-                                                iterations, 
-                                                IsLeNet, 
-                                                ds_name
-                                                )     
-        best_q_values = np.array(best_q_values)
+        learner = aal.ucb_learner(
+                        # parameters that define the search space
+                        sp_num=num_sub_policies,
+                        p_bins=11,
+                        m_bins=10,
+                        discrete_p_m=True,
+                        # hyperparameters for when training the child_network
+                        batch_size=batch_size,
+                        toy_size=toy_size,
+                        learning_rate=learning_rate,
+                        max_epochs=max_epochs,
+                        early_stop_num=early_stop_num,
+                        # ucb_learner specific hyperparameter
+                        num_policies=num_policies
+                        )
+        pprint(learner.policies)
+        
+        learner.learn(
+            train_dataset=train_dataset,
+            test_dataset=test_dataset,
+            child_network_architecture=child_archi,
+            iterations=5
+            )
     elif auto_aug_learner == 'Evolutionary Learner':
         network = cont_n.evo_controller(fun_num=num_funcs, p_bins=1, m_bins=1, sub_num_pol=1)
         child_network = cn.LeNet()
@@ -71,68 +87,28 @@ def parse_users_learner_spec(
                                 )
         learner.run_instance()
     elif auto_aug_learner == 'Random Searcher':
-            # As opposed to when ucb==True, `ds` and `IsLenet` are processed outside of the agent
-            # This system makes more sense for the user who is not using the webapp and is instead
-            # using the library within their code
-        download = True
-        if ds == "MNIST":
-            train_dataset = datasets.MNIST(root='./MetaAugment/datasets/mnist/train', train=True, download=download)
-            test_dataset = datasets.MNIST(root='./MetaAugment/datasets/mnist/test', train=False,
-                                                download=download, transform=torchvision.transforms.ToTensor())
-        elif ds == "KMNIST":
-            train_dataset = datasets.KMNIST(root='./MetaAugment/datasets/kmnist/train', train=True, download=download)
-            test_dataset = datasets.KMNIST(root='./MetaAugment/datasets/kmnist/test', train=False,
-                                                download=download, transform=torchvision.transforms.ToTensor())
-        elif ds == "FashionMNIST":
-            train_dataset = datasets.FashionMNIST(root='./MetaAugment/datasets/fashionmnist/train', train=True, download=download)
-            test_dataset = datasets.FashionMNIST(root='./MetaAugment/datasets/fashionmnist/test', train=False,
-                                                download=download, transform=torchvision.transforms.ToTensor())
-        elif ds == "CIFAR10":
-            train_dataset = datasets.CIFAR10(root='./MetaAugment/datasets/cifar10/train', train=True, download=download)
-            test_dataset = datasets.CIFAR10(root='./MetaAugment/datasets/cifar10/test', train=False,
-                                                download=download, transform=torchvision.transforms.ToTensor())
-        elif ds == "CIFAR100":
-            train_dataset = datasets.CIFAR100(root='./MetaAugment/datasets/cifar100/train', train=True, download=download)
-            test_dataset = datasets.CIFAR100(root='./MetaAugment/datasets/cifar100/test', train=False,
-                                                download=download, transform=torchvision.transforms.ToTensor())
-        elif ds == 'Other':
-            dataset = datasets.ImageFolder('./MetaAugment/datasets/'+ ds_name)
-            len_train = int(0.8*len(dataset))
-            train_dataset, test_dataset = torch.utils.data.random_split(dataset, [len_train, len(dataset)-len_train])
-
-            # check sizes of images
-        img_height = len(train_dataset[0][0][0])
-        img_width = len(train_dataset[0][0][0][0])
-        img_channels = len(train_dataset[0][0])
-            # check output labels
-        if ds == 'Other':
-            num_labels = len(dataset.class_to_idx)
-        elif ds == "CIFAR10" or ds == "CIFAR100":
-            num_labels = (max(train_dataset.targets) - min(train_dataset.targets) + 1)
-        else:
-            num_labels = (max(train_dataset.targets) - min(train_dataset.targets) + 1).item()
-            # create toy dataset from above uploaded data
-        train_loader, test_loader = create_toy(train_dataset, test_dataset, batch_size, toy_size)
-            # create model
-        if IsLeNet == "LeNet":
-            model = cn.LeNet(img_height, img_width, num_labels, img_channels)
-        elif IsLeNet == "EasyNet":
-            model = cn.EasyNet(img_height, img_width, num_labels, img_channels)
-        elif IsLeNet == 'SimpleNet':
-            model = cn.SimpleNet(img_height, img_width, num_labels, img_channels)
-        else:
-            model = pickle.load(open(f'datasets/childnetwork', "rb"))
-
-            # use an aa_learner. in this case, a rs learner
-        agent = aal.randomsearch_learner(batch_size=batch_size,
-                                            learning_rate=learning_rate,
-                                            toy_size=toy_size,
-                                            max_epochs=max_epochs,
-                                            early_stop_num=early_stop_num,
-                                            )
+        agent = aal.randomsearch_learner(
+                                        sp_num=num_sub_policies,
+                                        batch_size=batch_size,
+                                        learning_rate=learning_rate,
+                                        toy_size=toy_size,
+                                        max_epochs=max_epochs,
+                                        early_stop_num=early_stop_num,
+                                        )
+        agent.learn(train_dataset,
+                    test_dataset,
+                    child_network_architecture=child_archi,
+                    iterations=iterations)
+    elif auto_aug_learner == 'GRU Learner':
+        agent = aal.gru_learner(
+                                sp_num=num_sub_policies,
+                                batch_size=batch_size,
+                                learning_rate=learning_rate,
+                                toy_size=toy_size,
+                                max_epochs=max_epochs,
+                                early_stop_num=early_stop_num,
+                                )
         agent.learn(train_dataset,
-                        test_dataset,
-                        child_network_architecture=model,
-                        iterations=iterations)
-    elif auto_aug_learner == 'Genetic Learner':
-        pass
\ No newline at end of file
+                    test_dataset,
+                    child_network_architecture=child_archi,
+                    iterations=iterations)
\ No newline at end of file