-
Notifications
You must be signed in to change notification settings - Fork 31
/
score_architectures.py
51 lines (38 loc) · 1.7 KB
/
score_architectures.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import numpy as np
import tensorflow as tf
from encoder import ControllerManager, StateSpace
tf.enable_eager_execution()
B = 5 # number of blocks in each cell
K = None # number of children networks to train
INPUT_B = 3 # number of blocks in each cell during training
MAX_EPOCHS = 3 # maximum number of epochs to train
BATCHSIZE = 128 # batchsize
CHILD_MODEL_LR = 0.001 # learning rate for the child models.
REGULARIZATION = 0 # regularization strength
CONTROLLER_CELLS = 100 # number of cells in RNN controller
RNN_TRAINING_EPOCHS = 10 # number of epochs to train the controller
RESTORE_CONTROLLER = True # restore controller to continue training
operators = ['3x3 dconv', '5x5 dconv', '7x7 dconv',
'1x7-7x1 conv', '3x3 maxpool', '3x3 avgpool'] # use the default set of operators, minus identity and conv 3x3
operators = ['3x3 maxpool', '1x7-7x1 conv'] # mini search space
# construct a state space
state_space = StateSpace(B, input_lookback_depth=0, input_lookforward_depth=0,
operators=operators)
# print the state space being searched
state_space.print_state_space()
# create the ControllerManager and build the internal policy network
controller = ControllerManager(state_space, B=B, K=K,
train_iterations=RNN_TRAINING_EPOCHS,
reg_param=REGULARIZATION,
controller_cells=CONTROLLER_CELLS,
input_B=INPUT_B,
restore_controller=RESTORE_CONTROLLER)
# train for number of trails
for trial in range(B):
if trial == 0:
k = None
else:
k = K
controller.update_step()
print()
print("Finished")