diff --git a/MetaAugment/autoaugment_learners/__init__.py b/MetaAugment/autoaugment_learners/__init__.py index 149af5b8165316ae1e00e6fc1c476338324b73cc..e844c5e0ae157061ceb54251b1fd3ecdbdea77c8 100644 --- a/MetaAugment/autoaugment_learners/__init__.py +++ b/MetaAugment/autoaugment_learners/__init__.py @@ -1,3 +1,4 @@ from .aa_learner import * from .randomsearch_learner import * from .gru_learner import * +from .evo_learner import * \ No newline at end of file diff --git a/MetaAugment/UCB1_JC_py.py b/MetaAugment/autoaugment_learners/ucb_learner.py similarity index 99% rename from MetaAugment/UCB1_JC_py.py rename to MetaAugment/autoaugment_learners/ucb_learner.py index 959d55230a8ad0bfa7c4973c8b0857d93c67213f..41b8977156e9148965b0ffa6c00fe4d0a4a2595d 100644 --- a/MetaAugment/UCB1_JC_py.py +++ b/MetaAugment/autoaugment_learners/ucb_learner.py @@ -20,8 +20,8 @@ from matplotlib import pyplot as plt from numpy import save, load from tqdm import trange -from .child_networks import * -from .main import create_toy, train_child_network +from ..child_networks import * +from ..main import create_toy, train_child_network # In[6]: diff --git a/MetaAugment/controller_networks/__init__.py b/MetaAugment/controller_networks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6182b736f03515c352860937ce318568cb3d65f8 --- /dev/null +++ b/MetaAugment/controller_networks/__init__.py @@ -0,0 +1 @@ +from .evo_controller import evo_controller \ No newline at end of file diff --git a/auto_augmentation/training.py b/auto_augmentation/training.py deleted file mode 100644 index 39001e9eb3bdbada40daef5cc443007e342206ac..0000000000000000000000000000000000000000 --- a/auto_augmentation/training.py +++ /dev/null @@ -1,72 +0,0 @@ -from flask import Blueprint, request, render_template, flash, send_file, current_app -import subprocess -import os -import zipfile - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim -import torch.utils.data as data_utils -import torchvision -import torchvision.datasets as datasets - -from matplotlib import pyplot as plt -from numpy import save, load -from tqdm import trange -torch.manual_seed(0) -# import agents and its functions - -from MetaAugment import UCB1_JC_py as UCB1_JC -from MetaAugment import Evo_learner as Evo - - - -bp = Blueprint("training", __name__) - - -@bp.route("/start_training", methods=["GET", "POST"]) -def response(): - - # hyperparameters to change - - # auto_aug_learner = session - - auto_aug_learner = current_app.config.get('AAL') - - num_policies = current_app.config.get('NP') - num_sub_policies = current_app.config.get('NSP') - batch_size = current_app.config.get('BS') - learning_rate = current_app.config.get('LR') - toy_size = current_app.config.get('TS') - max_epochs = current_app.config.get('ME') - early_stop_num = current_app.config.get('ESN') - iterations = current_app.config.get('IT') - IsLeNet = current_app.config.get('ISLENET') - ds_name = current_app.config.get('DSN') - num_funcs = current_app.config.get('NUMFUN') - ds = current_app.config.get('ds') - exclude_method = current_app.config.get('exc_meth') - - - if auto_aug_learner == 'UCB': - policies = UCB1_JC.generate_policies(num_policies, num_sub_policies) - q_values, best_q_values = UCB1_JC.run_UCB1(policies, batch_size, learning_rate, ds, toy_size, max_epochs, early_stop_num, iterations, IsLeNet, ds_name) - best_q_values = np.array(best_q_values) - - elif auto_aug_learner == 'Evolutionary Learner': - network = Evo.Learner(fun_num=num_funcs, p_bins=1, m_bins=1, sub_num_pol=1) - child_network = Evo.LeNet() - learner = Evo.Evolutionary_learner(network=network, fun_num=num_funcs, p_bins=1, mag_bins=1, sub_num_pol=1, ds = ds, ds_name=ds_name, exclude_method=exclude_method, child_network=child_network) - learner.run_instance() - elif auto_aug_learner == 'Random Searcher': - pass - elif auto_aug_learner == 'Genetic Learner': - pass - - return render_template("progress.html", auto_aug_learner=auto_aug_learner) - - - - diff --git a/backend_react/react_app.py b/backend_react/react_app.py index d61e884f224ca63d7cb7625917658adefa3f2808..21f5e8a2a9ae99d4b058931f2e912aa116d7ee0b 100644 --- a/backend_react/react_app.py +++ b/backend_react/react_app.py @@ -23,9 +23,9 @@ import os import sys sys.path.insert(0, os.path.abspath('..')) -# import agents and its functions -from ..MetaAugment import UCB1_JC_py as UCB1_JC -from ..MetaAugment.autoaugment_learners import evo_learner +# # import agents and its functions +from MetaAugment.autoaugment_learners import ucb_learner as UCB1_JC +from MetaAugment.autoaugment_learners import evo_learner import MetaAugment.controller_networks as cn import MetaAugment.autoaugment_learners as aal print('@@@ import successful') @@ -43,8 +43,8 @@ app = Flask(__name__) def get_form_data(): print('@@@ in Flask Home') # form_data = request.get_json() - form_data = request.files['ds_upload'] - print('@@@ form_data', form_data) + # form_data = request.files['ds_upload'] + # print('@@@ form_data', form_data) # form_data = request.form.get('test') # print('@@@ this is form data', request.get_data()) @@ -138,43 +138,59 @@ def get_form_data(): @app.route('/confirm', methods=['POST', 'GET']) def confirm(): print('inside confirm') + + # aa learner auto_aug_learner = current_app.config.get('AAL') + + # search space & problem setting + ds = current_app.config.get('ds') + ds_name = current_app.config.get('DSN') + exclude_method = current_app.config.get('exc_meth') num_policies = current_app.config.get('NP') num_sub_policies = current_app.config.get('NSP') - batch_size = current_app.config.get('BS') - learning_rate = current_app.config.get('LR') + num_funcs = current_app.config.get('NUMFUN') toy_size = current_app.config.get('TS') - max_epochs = current_app.config.get('ME') + + # child network + IsLeNet = current_app.config.get('ISLENET') + + # child network training hyperparameters + batch_size = current_app.config.get('BS') early_stop_num = current_app.config.get('ESN') iterations = current_app.config.get('IT') - IsLeNet = current_app.config.get('ISLENET') - ds_name = current_app.config.get('DSN') - num_funcs = current_app.config.get('NUMFUN') - ds = current_app.config.get('ds') - exclude_method = current_app.config.get('exc_meth') + learning_rate = current_app.config.get('LR') + max_epochs = current_app.config.get('ME') data = {'ds': ds, 'ds_name': ds_name, 'IsLeNet': IsLeNet, 'ds_folder.filename': ds_name, 'auto_aug_learner':auto_aug_learner, 'batch_size': batch_size, 'learning_rate': learning_rate, 'toy_size':toy_size, 'iterations':iterations, } - return {'data 1': 'show data'} + return {'batch_size': '12'} # ======================================================================== @app.route('/training', methods=['POST', 'GET']) def training(): + + # aa learner auto_aug_learner = current_app.config.get('AAL') + + # search space & problem setting + ds = current_app.config.get('ds') + ds_name = current_app.config.get('DSN') + exclude_method = current_app.config.get('exc_meth') + num_funcs = current_app.config.get('NUMFUN') num_policies = current_app.config.get('NP') num_sub_policies = current_app.config.get('NSP') - batch_size = current_app.config.get('BS') - learning_rate = current_app.config.get('LR') toy_size = current_app.config.get('TS') - max_epochs = current_app.config.get('ME') + + # child network + IsLeNet = current_app.config.get('ISLENET') + + # child network training hyperparameters + batch_size = current_app.config.get('BS') early_stop_num = current_app.config.get('ESN') iterations = current_app.config.get('IT') - IsLeNet = current_app.config.get('ISLENET') - ds_name = current_app.config.get('DSN') - num_funcs = current_app.config.get('NUMFUN') - ds = current_app.config.get('ds') - exclude_method = current_app.config.get('exc_meth') + learning_rate = current_app.config.get('LR') + max_epochs = current_app.config.get('ME') if auto_aug_learner == 'UCB': diff --git a/flask_mvp/auto_augmentation/progress.py b/flask_mvp/auto_augmentation/progress.py index abe15fe35fb226fe94c30d513e5634b22a72c30a..4c3e96b28ca42e47eada9913c5008bafb90f5ddb 100644 --- a/flask_mvp/auto_augmentation/progress.py +++ b/flask_mvp/auto_augmentation/progress.py @@ -18,8 +18,8 @@ from tqdm import trange torch.manual_seed(0) # import agents and its functions -from MetaAugment import UCB1_JC_py as UCB1_JC - +from MetaAugment.autoaugment_learners import ucb_learner +# hi from MetaAugment import Evo_learner as Evo import MetaAugment.autoaugment_learners as aal @@ -38,21 +38,28 @@ def response(): if request.method == 'POST': + # generate random policies at start + auto_aug_learner = request.form.get("auto_aug_selection") + + # search space & problem setting + ds = request.form.get("dataset_selection") # pick dataset (MNIST, KMNIST, FashionMNIST, CIFAR10, CIFAR100) + ds_up = request.files['dataset_upload'] exclude_method = request.form.getlist("action_space") num_funcs = 14 - len(exclude_method) + num_policies = 5 # fix number of policies + num_sub_policies = 5 # fix number of sub-policies in a policy + toy_size = 1 # total propeortion of training and test set we use - batch_size = 1 # size of batch the inner NN is trained with - learning_rate = 1e-1 # fix learning rate - ds = request.form.get("dataset_selection") # pick dataset (MNIST, KMNIST, FashionMNIST, CIFAR10, CIFAR100) - ds_up = request.files['dataset_upload'] + # child network + IsLeNet = request.form.get("network_selection") # using LeNet or EasyNet or SimpleNet ->> default nw_up = childnetwork = request.files['network_upload'] - toy_size = 1 # total propeortion of training and test set we use - max_epochs = 10 # max number of epochs that is run if early stopping is not hit + + # child network training hyperparameters + batch_size = 1 # size of batch the inner NN is trained with early_stop_num = 10 # max number of worse validation scores before early stopping is triggered - num_policies = 5 # fix number of policies - num_sub_policies = 5 # fix number of sub-policies in a policy iterations = 5 # total iterations, should be more than the number of policies - IsLeNet = request.form.get("network_selection") # using LeNet or EasyNet or SimpleNet ->> default + learning_rate = 1e-1 # fix learning rate + max_epochs = 10 # max number of epochs that is run if early stopping is not hit # if user upload datasets and networks, save them in the database @@ -83,16 +90,15 @@ def response(): childnetwork = request.files['network_upload'] childnetwork.save('./MetaAugment/child_networks/'+childnetwork.filename) - # generate random policies at start - auto_aug_leanrer = request.form.get("auto_aug_selection") - if auto_aug_leanrer == 'UCB': - policies = UCB1_JC.generate_policies(num_policies, num_sub_policies) - q_values, best_q_values = UCB1_JC.run_UCB1(policies, batch_size, learning_rate, ds, toy_size, max_epochs, early_stop_num, iterations, IsLeNet, ds_name) - elif auto_aug_leanrer == 'Evolutionary Learner': + + if auto_aug_learner == 'UCB': + policies = ucb_learner.generate_policies(num_policies, num_sub_policies) + q_values, best_q_values = ucb_learner.run_UCB1(policies, batch_size, learning_rate, ds, toy_size, max_epochs, early_stop_num, iterations, IsLeNet, ds_name) + elif auto_aug_learner == 'Evolutionary Learner': learner = Evo.Evolutionary_learner(fun_num=num_funcs, p_bins=1, mag_bins=1, sub_num_pol=1, ds_name=ds_name, exclude_method=exclude_method) learner.run_instance() - elif auto_aug_leanrer == 'Random Searcher': + elif auto_aug_learner == 'Random Searcher': # As opposed to when ucb==True, `ds` and `IsLenet` are processed outside of the agent # This system makes more sense for the user who is not using the webapp and is instead # using the library within their code @@ -157,7 +163,7 @@ def response(): test_dataset, child_network_architecture=model, iterations=iterations) - elif auto_aug_leanrer == 'Genetic Learner': + elif auto_aug_learner == 'Genetic Learner': pass plt.figure() @@ -165,8 +171,8 @@ def response(): # if auto_aug_learner == 'UCB': - # policies = UCB1_JC.generate_policies(num_policies, num_sub_policies) - # q_values, best_q_values = UCB1_JC.run_UCB1(policies, batch_size, learning_rate, ds, toy_size, max_epochs, early_stop_num, iterations, IsLeNet, ds_name) + # policies = ucb_learner.generate_policies(num_policies, num_sub_policies) + # q_values, best_q_values = ucb_learner.run_UCB1(policies, batch_size, learning_rate, ds, toy_size, max_epochs, early_stop_num, iterations, IsLeNet, ds_name) # # plt.figure() # # plt.plot(q_values) # best_q_values = np.array(best_q_values) diff --git a/auto_augmentation/templates/intro.html b/flask_mvp/auto_augmentation/templates/intro.html similarity index 100% rename from auto_augmentation/templates/intro.html rename to flask_mvp/auto_augmentation/templates/intro.html diff --git a/auto_augmentation/templates/training.html b/flask_mvp/auto_augmentation/templates/training.html similarity index 100% rename from auto_augmentation/templates/training.html rename to flask_mvp/auto_augmentation/templates/training.html diff --git a/flask_mvp/auto_augmentation/training.py b/flask_mvp/auto_augmentation/training.py new file mode 100644 index 0000000000000000000000000000000000000000..5e695b58a2994efb1bdc89bb363b3eddf643d9dc --- /dev/null +++ b/flask_mvp/auto_augmentation/training.py @@ -0,0 +1,99 @@ +from flask import Blueprint, request, render_template, flash, send_file, current_app +import subprocess +import os +import zipfile + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +import torch.utils.data as data_utils +import torchvision +import torchvision.datasets as datasets + +from matplotlib import pyplot as plt +from numpy import save, load +from tqdm import trange +torch.manual_seed(0) +# import agents and its functions + +import MetaAugment.autoaugment_learners as aal +import MetaAugment.controller_networks as cont_n +import MetaAugment.child_networks as cn + + + +bp = Blueprint("training", __name__) + + +@bp.route("/start_training", methods=["GET", "POST"]) +def response(): + + + + # aa learner + auto_aug_learner = current_app.config.get('AAL') + # auto_aug_learner = session + + # search space & problem setting + ds = current_app.config.get('ds') + ds_name = current_app.config.get('DSN') + exclude_method = current_app.config.get('exc_meth') + num_funcs = current_app.config.get('NUMFUN') + num_policies = current_app.config.get('NP') + num_sub_policies = current_app.config.get('NSP') + toy_size = current_app.config.get('TS') + + # child network + IsLeNet = current_app.config.get('ISLENET') + + # child network training hyperparameters + batch_size = current_app.config.get('BS') + early_stop_num = current_app.config.get('ESN') + iterations = current_app.config.get('IT') + learning_rate = current_app.config.get('LR') + max_epochs = current_app.config.get('ME') + + + if auto_aug_learner == 'UCB': + policies = aal.ucb_learner.generate_policies(num_policies, num_sub_policies) + q_values, best_q_values = aal.ucb_learner.run_UCB1( + policies, + batch_size, + learning_rate, + ds, + toy_size, + max_epochs, + early_stop_num, + iterations, + IsLeNet, + ds_name + ) + best_q_values = np.array(best_q_values) + + elif auto_aug_learner == 'Evolutionary Learner': + network = cont_n.evo_controller(fun_num=num_funcs, p_bins=1, m_bins=1, sub_num_pol=1) + child_network = cn.LeNet() + learner = aal.evo_learner( + network=network, + fun_num=num_funcs, + p_bins=1, + mag_bins=1, + sub_num_pol=1, + ds = ds, + ds_name=ds_name, + exclude_method=exclude_method, + child_network=child_network + ) + learner.run_instance() + elif auto_aug_learner == 'Random Searcher': + pass + elif auto_aug_learner == 'Genetic Learner': + pass + + return render_template("progress.html", auto_aug_learner=auto_aug_learner) + + + + diff --git a/src/pages/Confirm.js b/src/pages/Confirm.js index 779b6b2ef162233ae597ed58335a5da9c4334f05..e293133a61438cde948b463ad8a4d3e581873b27 100644 --- a/src/pages/Confirm.js +++ b/src/pages/Confirm.js @@ -4,14 +4,19 @@ import CheckCircleOutlineRoundedIcon from '@mui/icons-material/CheckCircleOutlin import TuneRoundedIcon from '@mui/icons-material/TuneRounded'; export default function Confirm() { + const [batchSize, setBatchSize] = useState(0) // // const [myData, setMyData] = useState([{}]) -// useEffect(async () => { -// await fetch('/confirm').then( -// response => {console.log('response', response, 'response.json()', response.json()); response.json()} -// ) -// // .then(data => {console.log('training', data); -// // }) -// }, []); + useEffect(() => { + const res = fetch('/confirm').then( + response => response.json() + ).then(data => setBatchSize(data.batch_size)); + + console.log("batchsize", batchSize) + // setBatchSize(res.batch_size) + + // .then(data => {console.log('training', data); + // }) + }, []); @@ -34,7 +39,7 @@ export default function Confirm() { <ListItemAvatar> <TuneRoundedIcon color="primary" fontSize='large'/> </ListItemAvatar> - <ListItemText primary="Batch size" secondary="[Batch size]" /> + <ListItemText primary="Batch size" secondary={batchSize} /> </ListItem> </Grid> <Grid xs={12} sm={6} item > diff --git a/src/pages/Home.js b/src/pages/Home.js index 63183d191cb0a4cd8c098f9ab2c289d9d58ede88..a3057f585732b495bbd13972e39d25157eccbe67 100644 --- a/src/pages/Home.js +++ b/src/pages/Home.js @@ -58,6 +58,7 @@ export default function Home() { body: formData }).then((response) => response.json()); console.log('check if it is here') + navigate('/confirm', {replace:true}); // ///////// testing