support other profling metrics such as energy
This commit is contained in:
Родитель
402a213492
Коммит
849ace5694
|
@ -85,7 +85,7 @@ class Latency:
|
|||
return self + rhs.__neg__()
|
||||
|
||||
|
||||
def dump_profiled_results(results, detail = False):
|
||||
def dump_profiled_results(results, detail = False, metrics = ["latency"]):
|
||||
''' convert Latency instance to string and return profiled results
|
||||
|
||||
@params
|
||||
|
@ -105,8 +105,12 @@ def dump_profiled_results(results, detail = False):
|
|||
else:
|
||||
dumped_results[module_key][model_key][info_key] = info
|
||||
else:
|
||||
if 'latency' in model:
|
||||
dumped_results[module_key][model_key]['latency'] = str(model['latency'])
|
||||
for info_key, info in model.items():
|
||||
if info_key in metrics:
|
||||
if info_key == 'latency':
|
||||
dumped_results[module_key][model_key]['latency'] = str(model['latency'])
|
||||
else:
|
||||
dumped_results[module_key][model_key][info_key] = model[info_key]
|
||||
return dumped_results
|
||||
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ from .extract_features import get_feature_parser, get_data_by_profiled_results
|
|||
logging = logging.getLogger("nn-Meter")
|
||||
|
||||
|
||||
def build_predictor_by_data(kernel_type, kernel_data, backend = None, error_threshold = 0.1, mark = '', save_path = None):
|
||||
def build_predictor_by_data(kernel_type, kernel_data, backend = None, error_threshold = 0.1, mark = '', save_path = None, predict_label = "latency"):
|
||||
"""
|
||||
build regression model by sampled data and latency, locate data with large-errors. Returns (current predictor, 10% Accuracy, error_cfgs),
|
||||
where error_cfgs represent configuration list, where each item is a configuration for one large-error-data.
|
||||
|
@ -28,9 +28,11 @@ def build_predictor_by_data(kernel_type, kernel_data, backend = None, error_thre
|
|||
mark (str): the mark for the running results. Defaults to ''.
|
||||
|
||||
save_path (str): the folder to save results file such as feature table and predictor pkl file
|
||||
|
||||
predict_label (str): the predicting label to build kernel predictor
|
||||
"""
|
||||
feature_parser = get_feature_parser(kernel_type)
|
||||
data = get_data_by_profiled_results(kernel_type, feature_parser, kernel_data, save_path=os.path.join(save_path, f'Data_{kernel_type}_{mark}.csv'))
|
||||
data = get_data_by_profiled_results(kernel_type, feature_parser, kernel_data, save_path=os.path.join(save_path, f'Data_{kernel_type}_{mark}.csv'), predict_label=predict_label)
|
||||
|
||||
# get data for regression
|
||||
X, Y = data
|
||||
|
|
|
@ -102,7 +102,7 @@ def get_feature_parser(kernel_type):
|
|||
return BaseFeatureParser(kernel_type)
|
||||
|
||||
|
||||
def get_data_by_profiled_results(kernel_type, feature_parser, cfgs_path, lats_path = None, save_path = None):
|
||||
def get_data_by_profiled_results(kernel_type, feature_parser, cfgs_path, labs_path = None, save_path = None, predict_label = "latency"):
|
||||
''' return (features, latency)
|
||||
kernel_type (str): type of kernel
|
||||
|
||||
|
@ -125,7 +125,7 @@ def get_data_by_profiled_results(kernel_type, feature_parser, cfgs_path, lats_pa
|
|||
}
|
||||
}
|
||||
|
||||
lats_path: pathe of profiled latency information dict, or dict of "profiled_results", such as
|
||||
labs_path: path of profiled label information dict, or dict of "profiled_results", such as
|
||||
{
|
||||
"conv_bn_relu": {
|
||||
"id_0": {
|
||||
|
@ -133,37 +133,42 @@ def get_data_by_profiled_results(kernel_type, feature_parser, cfgs_path, lats_pa
|
|||
}
|
||||
}
|
||||
}
|
||||
if lats_path == None, it means latency information are also included in cfgs_path.
|
||||
if labs_path == None, it means latency (or other label) information are also included in cfgs_path.
|
||||
|
||||
save_path: the path to save the feature and latency information
|
||||
|
||||
predict_label (str): the predicting label to build kernel predictor
|
||||
'''
|
||||
if lats_path == None:
|
||||
if labs_path == None:
|
||||
if type(cfgs_path) == tuple:
|
||||
cfgs_path, lats_path = cfgs_path
|
||||
cfgs_path, labs_path = cfgs_path
|
||||
else:
|
||||
lats_path = cfgs_path
|
||||
labs_path = cfgs_path
|
||||
if isinstance(cfgs_path, str):
|
||||
with open(cfgs_path, 'r') as fp:
|
||||
cfgs_dict = json.load(fp)[kernel_type]
|
||||
else:
|
||||
cfgs_dict = cfgs_path[kernel_type] if kernel_type in cfgs_path else cfgs_path
|
||||
if isinstance(lats_path, str):
|
||||
with open(lats_path, 'r') as fp:
|
||||
lats_dict = read_profiled_results(json.load(fp))[kernel_type]
|
||||
if isinstance(labs_path, str):
|
||||
with open(labs_path, 'r') as fp:
|
||||
labs_dict = read_profiled_results(json.load(fp))[kernel_type]
|
||||
else:
|
||||
lats_dict = lats_path[kernel_type] if kernel_type in lats_path else lats_path
|
||||
labs_dict = labs_path[kernel_type] if kernel_type in labs_path else labs_path
|
||||
|
||||
paths, features, lats = [], [], []
|
||||
for id in lats_dict.keys():
|
||||
paths, features, labs = [], [], []
|
||||
for id in labs_dict.keys():
|
||||
try:
|
||||
path = cfgs_dict[id]["model"]
|
||||
configs = cfgs_dict[id]["config"]
|
||||
feature = feature_parser.get_feature_by_config(configs)
|
||||
latency = lats_dict[id]["latency"].avg
|
||||
if latency != 0.0:
|
||||
if predict_label == "latency":
|
||||
label = labs_dict[id]["latency"].avg
|
||||
else:
|
||||
label = labs_dict[id][predict_label]
|
||||
if label != 0.0:
|
||||
paths.append(os.path.basename(path))
|
||||
features.append(feature)
|
||||
lats.append(latency)
|
||||
labs.append(label)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
@ -171,12 +176,12 @@ def get_data_by_profiled_results(kernel_type, feature_parser, cfgs_path, lats_pa
|
|||
if save_path:
|
||||
import pandas as pd
|
||||
cols = feature_parser.needed_config[:]
|
||||
if len(features[0]) - len(feature_parser.needed_config) > 0:
|
||||
if len(features[0]) - len(feature_parser.needed_config) > 0: # there are extra features beyond needed config
|
||||
cols += [f'feature_{i}' for i in range(len(features[0]) - len(feature_parser.needed_config))]
|
||||
data_df = pd.DataFrame(features, columns=cols)
|
||||
data_df = pd.concat([pd.DataFrame(paths, columns=["model_path"]), data_df], axis=1)
|
||||
data_df["latency_ms"] = lats
|
||||
data_df[predict_label] = labs
|
||||
data_df.to_csv(save_path, index=False)
|
||||
logging.info(f'Saved the feature table of all data for {kernel_type} in path {save_path}.')
|
||||
|
||||
return (features, lats)
|
||||
return (features, labs)
|
||||
|
|
|
@ -84,6 +84,8 @@ def profile_models(backend, models, mode = 'ruletest', metrics = ["latency"], sa
|
|||
have_converted (boolean): if the model have been converted to the needed format by backend, the model will not be converted
|
||||
before profiling. The model path of `model['converted_model']` will be profiled on device directly. The conversion of
|
||||
model could be done by appling `nn_meter.builder.convert_models`
|
||||
|
||||
**kwargs: arguments for profiler
|
||||
"""
|
||||
if isinstance(models, str):
|
||||
with open(models, 'r') as fp:
|
||||
|
@ -107,35 +109,36 @@ def profile_models(backend, models, mode = 'ruletest', metrics = ["latency"], sa
|
|||
model_path = model['converted_model']
|
||||
profiled_res = backend.profile(model_path, metrics, model['shapes'], **kwargs)
|
||||
for metric in metrics:
|
||||
model[metric] = profiled_res[metric]
|
||||
time.sleep(2)
|
||||
model[metric] = profiled_res.data[metric]
|
||||
time.sleep(0.2)
|
||||
count += 1
|
||||
except:
|
||||
open(os.path.join(info_save_path, "profile_error.log"), 'a').write(id + "\n")
|
||||
except Exception as e:
|
||||
open(os.path.join(info_save_path, "profile_error.log"), 'a').write(f"{id}: {e}\n")
|
||||
else: # the models have not been converted
|
||||
try:
|
||||
model_path = model['model']
|
||||
profiled_res = backend.profile_model_file(model_path, model_save_path, model['shapes'], metrics, **kwargs)
|
||||
for metric in metrics:
|
||||
model[metric] = profiled_res[metric]
|
||||
model[metric] = profiled_res.data[metric]
|
||||
time.sleep(0.2)
|
||||
count += 1
|
||||
except:
|
||||
open(os.path.join(info_save_path, "profile_error.log"), 'a').write(id + "\n")
|
||||
except Exception as e:
|
||||
open(os.path.join(info_save_path, "profile_error.log"), 'a').write(f"{id}: {e}\n")
|
||||
|
||||
# save information to json file for per 50 models
|
||||
if count > 0 and count % 50 == 0:
|
||||
save_profiled_results(models, os.path.join(info_save_path, save_name), detail)
|
||||
save_profiled_results(models, os.path.join(info_save_path, save_name), detail, metrics)
|
||||
logging.keyinfo(f"{count} model complete. Still profiling... Save the intermediate results to {os.path.join(info_save_path, save_name)}.")
|
||||
|
||||
# save information to json file
|
||||
save_profiled_results(models, os.path.join(info_save_path, save_name), detail)
|
||||
save_profiled_results(models, os.path.join(info_save_path, save_name), detail, metrics)
|
||||
logging.keyinfo(f"All {count} models complete. Save all success profiled results to {os.path.join(info_save_path, save_name)}.")
|
||||
|
||||
return models
|
||||
|
||||
|
||||
def sample_and_profile_kernel_data(kernel_type, sample_num, backend, sampling_mode = 'prior', configs = None, mark = '', detail = True):
|
||||
def sample_and_profile_kernel_data(kernel_type, sample_num, backend, sampling_mode = 'prior', configs = None, mark = '', detail = True,
|
||||
metrics = ["latency"], **kwargs):
|
||||
''' sample kernel configs and profile kernel model based on configs
|
||||
'''
|
||||
from nn_meter.builder.kernel_predictor_builder import generate_config_sample
|
||||
|
@ -146,11 +149,11 @@ def sample_and_profile_kernel_data(kernel_type, sample_num, backend, sampling_mo
|
|||
|
||||
# connect to backend, run models and get latency
|
||||
backend = connect_backend(backend_name=backend)
|
||||
profiled_results = profile_models(backend, models, mode='predbuild', save_name=f"profiled_{kernel_type}.json")
|
||||
profiled_results = profile_models(backend, models, mode='predbuild', metrics=metrics, save_name=f"profiled_{kernel_type}.json")
|
||||
return profiled_results
|
||||
|
||||
|
||||
def build_predictor_for_kernel(kernel_type, backend, init_sample_num = 1000, finegrained_sample_num = 10, iteration = 5, error_threshold = 0.1):
|
||||
def build_predictor_for_kernel(kernel_type, backend, init_sample_num = 1000, finegrained_sample_num = 10, iteration = 5, error_threshold = 0.1, predict_label = "latency"):
|
||||
"""
|
||||
Build latency predictor for given kernel. This method contains three main steps:
|
||||
1. sample kernel configs and profile kernel model based on configs;
|
||||
|
@ -171,6 +174,9 @@ def build_predictor_for_kernel(kernel_type, backend, init_sample_num = 1000, fin
|
|||
iteration (int, optional): the iteration for adaptive sampler. Defaults to 5.
|
||||
|
||||
error_threshold (float, optional): the threshold of large error. Defaults to 0.2.
|
||||
|
||||
predict_label (str): the predicting label to build kernel predictor. Defaults to "latency"
|
||||
|
||||
"""
|
||||
from nn_meter.builder.kernel_predictor_builder import build_predictor_by_data
|
||||
ws_mode_path = builder_config.get('MODEL_DIR', 'predbuild')
|
||||
|
@ -181,7 +187,7 @@ def build_predictor_for_kernel(kernel_type, backend, init_sample_num = 1000, fin
|
|||
|
||||
# use current sampled data to build regression model, and locate data with large errors in testset
|
||||
predictor, acc10, error_configs = build_predictor_by_data(kernel_type, kernel_data, backend, error_threshold=error_threshold, mark='prior',
|
||||
save_path=os.path.join(ws_mode_path, "results"))
|
||||
save_path=os.path.join(ws_mode_path, "results"), predict_label=predict_label)
|
||||
logging.keyinfo(f'Iteration 0: acc10 {acc10}, error_configs number: {len(error_configs)}')
|
||||
|
||||
for i in range(1, iteration):
|
||||
|
@ -192,7 +198,7 @@ def build_predictor_for_kernel(kernel_type, backend, init_sample_num = 1000, fin
|
|||
# merge finegrained data with previous data and build new regression model
|
||||
kernel_data = merge_prev_info(new_info=new_kernel_data, prev_info=kernel_data)
|
||||
predictor, acc10, error_configs = build_predictor_by_data(kernel_type, kernel_data, backend, error_threshold=error_threshold, mark='finegrained{i}',
|
||||
save_path=os.path.join(ws_mode_path, "results"))
|
||||
save_path=os.path.join(ws_mode_path, "results"), predict_label=predict_label)
|
||||
logging.keyinfo(f'Iteration {i}: acc10 {acc10}, error_configs number: {len(error_configs)}')
|
||||
|
||||
return predictor, kernel_data
|
||||
|
|
|
@ -47,8 +47,8 @@ def merge_prev_info(new_info, info_save_path = None, prev_info = None):
|
|||
return prev_info
|
||||
|
||||
|
||||
def save_profiled_results(models, save_path, detail):
|
||||
def save_profiled_results(models, save_path, detail, metrics = ["latency"]):
|
||||
new_models = merge_prev_info(new_info=models, info_save_path=save_path)
|
||||
from .backend_meta.utils import dump_profiled_results
|
||||
with open(save_path, 'w') as fp:
|
||||
json.dump(dump_profiled_results(new_models, detail=detail), fp, indent=4)
|
||||
json.dump(dump_profiled_results(new_models, detail=detail, metrics=metrics), fp, indent=4)
|
||||
|
|
Загрузка…
Ссылка в новой задаче