Simulation code on DARTS logs nominally working.

This commit is contained in:
Debadeepta Dey 2021-10-19 19:27:11 -07:00 коммит произвёл Gustavo Rosa
Родитель 7ebad4ff40
Коммит 41c2fe018f
2 изменённых файлов: 129 добавлений и 4 удалений

9
.vscode/launch.json поставляемый
Просмотреть файл

@ -930,6 +930,15 @@
"args": ["--nb301-logs-dir", "C:\\Users\\dedey\\dataroot\\nasbench301\\nasbench301_full_data\\nb_301_v13_lc_iclr_final\\rs",
"--out-dir", "F:\\archai_experiment_reports"]
},
{
"name": "Analysis Simulate FEAR on Nasbench301 Toy",
"type": "python",
"request": "launch",
"program": "${cwd}/scripts/reports/fear_analysis/simulate_fear_on_nb301.py",
"console": "integratedTerminal",
"args": ["--nb301-logs-dir", "C:\\Users\\dedey\\dataroot\\nasbench301\\nasbench301_full_data\\nb_301_v13_toy",
"--out-dir", "F:\\archai_experiment_reports"]
},
{
"name": "CurrentFile",
"type": "python",

Просмотреть файл

@ -2,13 +2,34 @@ from collections import defaultdict
import json
import argparse
import os
from typing import List
from typing import Dict, List
from tqdm import tqdm
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from scipy.stats import kendalltau, spearmanr
from scipy.stats import kendalltau, spearmanr, sem
import statistics
def plot_spearman_top_percents(results:Dict[str, list],
plotly_fig_handle,
legend_text:str,
marker_color:str):
for idx, tp in enumerate(results['top_percents']):
avg_time = results['avg_times'][idx]
stderr = results['stderr_times'][idx]
error_x = dict(type='data', array=[stderr], visible=True, thickness=1, width=0)
spe = results['spes'][idx]
show_legend = False if idx > 0 else True
plotly_fig_handle.add_trace(go.Scatter(x=[avg_time],
error_x=error_x,
y=[spe],
mode='markers',
name=legend_text,
showlegend=show_legend,
marker_color=marker_color),
row=idx+1, col=1)
def find_train_thresh_epochs(train_acc:List[float], train_thresh:float)->int:
@ -17,6 +38,55 @@ def find_train_thresh_epochs(train_acc:List[float], train_thresh:float)->int:
return i + 1
<<<<<<< HEAD
=======
def top_buckets_spearmans(all_reg_evals:List[float],
all_proxy_evals:List[float],
all_proxy_times:List[float]):
assert len(all_reg_evals) == len(all_proxy_evals)
assert len(all_reg_evals) == len(all_proxy_times)
reg_proxy = [(x, y, z) for x, y, z in zip(all_reg_evals, all_proxy_evals, all_proxy_times)]
# sort in descending order of accuracy of regular evaluation
reg_proxy.sort(key= lambda x: x[0], reverse=True)
top_percent_times_avg = []
top_percent_times_std = []
top_percent_times_stderr = []
spe_top_percents = []
top_percents = []
top_percent_range = range(10, 101, 10)
for top_percent in top_percent_range:
top_percents.append(top_percent)
num_to_keep = int(ma.floor(len(reg_proxy) * top_percent * 0.01))
top_percent_reg_proxy_times = reg_proxy[:num_to_keep]
top_percent_reg = [x[0] for x in top_percent_reg_proxy_times]
top_percent_proxy = [x[1] for x in top_percent_reg_proxy_times]
top_percent_proxy_times = [x[2] for x in top_percent_reg_proxy_times]
top_percent_times_avg.append(np.mean(np.array(top_percent_proxy_times)))
top_percent_times_std.append(np.std(np.array(top_percent_proxy_times)))
top_percent_times_stderr.append(sem(np.array(top_percent_proxy_times)))
spe_proxy, _ = spearmanr(top_percent_reg, top_percent_proxy)
spe_top_percents.append(spe_proxy)
results = {
'top_percents': top_percents,
'spes': spe_top_percents,
'avg_times': top_percent_times_avg,
'std_times': top_percent_times_std,
'stderr_times': top_percent_times_stderr
}
return results
>>>>>>> 17e92924 (Simulation code on DARTS logs nominally working.)
def main():
parser = argparse.ArgumentParser(description='Nasbench301 time to threshold vs. test accuracy')
parser.add_argument('--nb301-logs-dir', '-d', type=str, help='folder with nasbench301 architecture training logs')
@ -65,7 +135,6 @@ def main():
all_reg_train_time_per_epoch[epoch_num].append((epoch_num + 1) * per_epoch_time)
fear_train_acc_spe, _ = spearmanr(all_test_acc, all_fear_end_acc)
print(f'FEAR Spearman training accuracy: {fear_train_acc_spe}')
print(f'FEAR avg time: {statistics.mean(all_fear_time)}')
@ -81,7 +150,54 @@ def main():
for epoch_num, spe in spes_train_acc_vs_epoch.items():
avg_time = avg_time_train_acc_vs_epoch[epoch_num]
<<<<<<< HEAD
print(f'Epoch {epoch_num}, spearman {spe}, avg. time: {avg_time} seconds')
=======
# print(f'Epoch {epoch_num}, spearman {spe}, avg. time: {avg_time} seconds')
# FEAR rank correlations at top n percent of architectures
# -------------------------------------------------------------
fear_results = top_buckets_spearmans(all_reg_evals=all_test_acc,
all_proxy_evals=all_fear_end_acc,
all_proxy_times=all_fear_time)
# picking epoch 10 to plot for regular evaluation
reg_results = {}
for epoch_num in all_reg_train_acc.keys():
all_reg = all_reg_train_acc[epoch_num]
all_reg_times = all_reg_train_time_per_epoch[epoch_num]
reg_results[epoch_num] = top_buckets_spearmans(all_reg_evals=all_test_acc,
all_proxy_evals=all_reg,
all_proxy_times=all_reg_times)
# plot
num_plots = len(fear_results['top_percents'])
num_plots_per_row = num_plots
num_plots_per_col = 1
subplot_titles = [f'Top {x} %' for x in fear_results['top_percents']]
fig = make_subplots(rows=num_plots_per_row,
cols=num_plots_per_col,
subplot_titles=subplot_titles,
shared_yaxes=False)
plot_spearman_top_percents(fear_results, fig, 'FEAR', 'red')
for epoch_num, epoch_num_results in reg_results.items():
plot_spearman_top_percents(epoch_num_results, fig, f'Regular epochs {epoch_num}', 'blue')
fig.update_layout(title_text="Duration vs. Spearman Rank Correlation vs. Top %")
fig.show()
# Regular evaluation rank correlations at top n percent of architectures
# -----------------------------------------------------------------------
print('dummy')
>>>>>>> 17e92924 (Simulation code on DARTS logs nominally working.)