Add AppInsights ServiceName Implementation (#3555)

* Add service name implementation

* indentation fix

* fstring indentation fix

* Evaluator Built in image bump to v6

* indentation fix - no additional line

---------

Co-authored-by: Shruti Iyer <iyershruti@microsoft.com>
This commit is contained in:
apeddauppari 2024-11-01 22:29:37 +05:30 коммит произвёл GitHub
Родитель 953eb97cb3
Коммит 4d545071e8
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: B5690EEEBB952194
4 изменённых файлов: 21 добавлений и 10 удалений

Просмотреть файл

@ -126,8 +126,9 @@ parser.add_argument("--evaluators", type=str)
parser.add_argument("--evaluator_name_id_map", type=str)
args = parser.parse_args()
rai_evaluators = ['HateUnfairnessEvaluator', 'Sexual-Content-Evaluator', 'Hate-and-Unfairness-Evaluator',
'Violent-Content-Evaluator', 'Self-Harm-Related-Content-Evaluator']
rai_evaluators = ['Sexual-Content-Evaluator', 'Hate-and-Unfairness-Evaluator',
'Violent-Content-Evaluator', 'Self-Harm-Related-Content-Evaluator',
'Groundedness-Pro-Evaluator', 'Protected-Material-Evaluator', 'Indirect-Attack-Evaluator']
if __name__ == '__main__':
copy_evaluator_files(args)

Просмотреть файл

@ -138,11 +138,13 @@ def run_evaluation(command_line_args, evaluators, evaluator_configs):
rai_evaluators = [
"HateUnfairnessEvaluator",
"Sexual-Content-Evaluator",
"Hate-and-Unfairness-Evaluator",
"Violent-Content-Evaluator",
"Self-Harm-Related-Content-Evaluator",
"Groundedness-Pro-Evaluator",
"Protected-Material-Evaluator",
"Indirect-Attack-Evaluator",
]

Просмотреть файл

@ -23,6 +23,7 @@ def get_args():
default="./preprocessed_data_output.jsonl")
parser.add_argument("--evaluated_data", type=str, dest="evaluated_data", default="./evaluated_data_output.jsonl")
parser.add_argument("--evaluators", type=str, dest="evaluators")
parser.add_argument("--service_name", type=str, dest="service_name", default="evaluation.app")
args, _ = parser.parse_known_args()
return vars(args)

Просмотреть файл

@ -30,6 +30,7 @@ def get_args():
parser.add_argument("--evaluated_data", type=str, dest="evaluated_data", default="./evaluated_data_output.jsonl")
parser.add_argument("--connection_string", type=str, dest="connection_string", default=None)
parser.add_argument("--sampling_rate", type=str, dest="sampling_rate", default="1")
parser.add_argument("--service_name", type=str, dest="service_name", default="evaluation.app")
args, _ = parser.parse_known_args()
return vars(args)
@ -48,11 +49,11 @@ def configure_logging(args) -> LoggerProvider:
return provider
def log_evaluation_event_single(trace_id, span_id, trace_flags, response_id, evaluation):
def log_evaluation_event_single(trace_id, span_id, trace_flags, response_id, evaluation, service_name):
"""Log evaluation event."""
for name, value in evaluation.items():
attributes = {"event.name": "gen_ai.evaluation.{name}", "gen_ai.evaluation.score": json.dumps(value),
"gen_ai.response_id": response_id}
attributes = {"event.name": f"gen_ai.evaluation.{name}", "gen_ai.evaluation.score": json.dumps(value),
"gen_ai.response_id": response_id, "service.name": service_name}
body = f"gen_ai.evaluation for response_id: {response_id}"
event = opentelemetry.sdk._logs.LogRecord(
@ -79,13 +80,14 @@ def log_evaluation_event(row) -> None:
trace_flags = TraceFlags(TraceFlags.SAMPLED)
response_id = row.get("gen_ai_response_id", "")
evaluation_results = row.get("evaluation", {})
service_name = row.get("service_name", "evaluation.app")
if isinstance(evaluation_results, dict):
evaluation_results = [evaluation_results]
for evaluation in evaluation_results:
log_evaluation_event_single(trace_id, span_id, trace_flags, response_id, evaluation)
log_evaluation_event_single(trace_id, span_id, trace_flags, response_id, evaluation, service_name)
def get_combined_data(preprocessed_data, evaluated_data):
def get_combined_data(preprocessed_data, evaluated_data, service_name):
"""Combine preprocessed and evaluated data."""
logger.info("Combining preprocessed and evaluated data.")
preprocessed_df = pd.read_json(preprocessed_data, lines=True)
@ -95,14 +97,19 @@ def get_combined_data(preprocessed_data, evaluated_data):
evaluation_data.append(json.loads(line))
preprocessed_df["evaluation"] = evaluation_data
preprocessed_df["service_name"] = service_name
return preprocessed_df
def run(args):
"""Entry point of model prediction script."""
logger.info(f"Sampling Rate: {args['sampling_rate']}, Connection String: {args['connection_string']}")
logger.info(
f"Sampling Rate: {args['sampling_rate']}, Connection String: {args['connection_string']}, "
f"Service Name: {args['service_name']}"
)
provider = configure_logging(args)
data = get_combined_data(args["preprocessed_data"], args["evaluated_data"])
data = get_combined_data(args["preprocessed_data"], args["evaluated_data"],
args["service_name"])
for _, row in data.iterrows():
log_evaluation_event(row)
provider.force_flush()