Skip to content
Snippets Groups Projects
Commit 890b6c6c authored by Sun Jin Kim's avatar Sun Jin Kim
Browse files

add node_modules/ to .gitignore

parent c68dc525
No related branches found
No related tags found
No related merge requests found
...@@ -123,6 +123,7 @@ venv/ ...@@ -123,6 +123,7 @@ venv/
ENV/ ENV/
env.bak/ env.bak/
venv.bak/ venv.bak/
node_modules./
# Spyder project settings # Spyder project settings
.spyderproject .spyderproject
...@@ -158,4 +159,6 @@ cython_debug/ ...@@ -158,4 +159,6 @@ cython_debug/
# our project specific stuff # our project specific stuff
MetaAugment/__pycache__/main.cpython-38.pyc MetaAugment/__pycache__/main.cpython-38.pyc
**/mnist/ **/mnist/
\ No newline at end of file
**/.DS_Store
\ No newline at end of file
File deleted
...@@ -18,7 +18,7 @@ AutoAugment paper: ...@@ -18,7 +18,7 @@ AutoAugment paper:
.. code-block:: .. code-block::
my_policy = subpolicies1 = [ my_policy = [
(("Invert", 0.8, None), ("Contrast", 0.2, 6)), (("Invert", 0.8, None), ("Contrast", 0.2, 6)),
(("Rotate", 0.7, 2), ("Invert", 0.8, None)), (("Rotate", 0.7, 2), ("Invert", 0.8, None)),
(("Sharpness", 0.8, 1), ("Sharpness", 0.9, 3)), (("Sharpness", 0.8, 1), ("Sharpness", 0.9, 3)),
......
# You can run this in the main directory by typing:
# python -m tutorials.how_use_aalearner
import MetaAugment.autoaugment_learners as aal
import MetaAugment.child_networks as cn
import torchvision.datasets as datasets
import torchvision
# Defining our problem setting:
# In other words, specifying the dataset and the child network
train_dataset = datasets.MNIST(root='./MetaAugment/datasets/mnist/train',
train=True, download=True, transform=None)
test_dataset = datasets.MNIST(root='./MetaAugment/datasets/mnist/test',
train=False, download=True, transform=torchvision.transforms.ToTensor())
child_network = cn.lenet
# NOTE: It is important not to type:
# child_network = cn.lenet()
# We need the ``child_network`` variable to be a ``type``, not a ``nn.Module``
# because the ``child_network`` will be called multiple times to initialize a
# ``nn.Module`` of its architecture multiple times: once every time we need to
# train a different network to evaluate a different policy.
# Using the random search learner to evaluate randomly generated policies
rs_agent = aal.randomsearch_learner()
rs_agent.learn(train_dataset, test_dataset, child_network, toy_flag=True)
# Viewing the results
# ``.history`` is a list containing all the policies tested and the respective
# accuracies obtained when trained using them
print(rs_agent.history)
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment