set apex for nvidia, set workers to non-none

This commit is contained in:
Shital Shah 2020-05-02 05:58:24 -07:00
Родитель b86b9f5053
Коммит 1904a7ab65
2 изменённых файлов: 7 добавлений и 5 удалений

Просмотреть файл

@ -81,7 +81,7 @@ nas:
cutout: 16 # cutout length, use cutout augmentation when > 0
load_train: True # load train split of dataset
train_batch: 96
train_workers: null # if null then 4
train_workers: 4
test_workers: '_copy: ../train_workers' # if null then 4
load_test: True # load test split of dataset
test_batch: 1024
@ -189,10 +189,10 @@ nas:
cutout: 0 # cutout length, use cutout augmentation when > 0
load_train: True # load train split of dataset
train_batch: 64
train_workers: null # if null then gpu_count*4
train_workers: 4 # if null then gpu_count*4
test_workers: '_copy: ../train_workers' # if null then 4
load_test: False # load test split of dataset
test_batch: 1024
test_workers: null # if null then gpu_count*4
val_ratio: 0.5 #split portion for test set, 0 to 1
val_fold: 0 #Fold number to use (0 to 4)
cv_num: 5 # total number of folds available
@ -252,10 +252,10 @@ autoaug:
epochs: 50
load_train: True # load train split of dataset
train_batch: 64
train_workers: null # if null then gpu_count*4
train_workers: 4 # if null then gpu_count*4
test_workers: '_copy: ../train_workers' # if null then 4
load_test: True # load test split of dataset
test_batch: 1024
test_workers: null # if null then gpu_count*4
val_ratio: 0.4 #split portion for test set, 0 to 1
val_fold: 0 #Fold number to use (0 to 4)
cv_num: 5 # total number of folds available

Просмотреть файл

@ -60,6 +60,8 @@ nas:
_copy: '/dataset_eval'
trainer:
apex:
enabled: True # global switch to disable everything apex
distributed_enabled: True # enable/disable distributed mode
loss_scale: "128.0" # loss scaling mode for mixed prec, must be string reprenting float ot "dynamic"
aux_weight: 0.0 # weight for loss from auxiliary towers in test time arch
drop_path_prob: 0.0 # probability that given edge will be dropped