Backend and OD Notebook Dataset fixes (#2360)
* pandas operation fix * cohort fix * windows test fix * backend & dataset fixes * reverted test file * python lint fixes * eslint updates * auto lint fixes * auto lint fixes * test update * removed file
This commit is contained in:
Родитель
d7cd9b1efd
Коммит
40af9df23b
|
@ -79,6 +79,7 @@
|
|||
"feature_value",
|
||||
"global_effects",
|
||||
"identity_feature_name",
|
||||
"image_dimensions",
|
||||
"is_large_data_scenario",
|
||||
"local_effects",
|
||||
"local_importance",
|
||||
|
@ -99,6 +100,7 @@
|
|||
"model_type",
|
||||
"n_samples",
|
||||
"num_rows",
|
||||
"object_detection_labels",
|
||||
"p_value",
|
||||
"plot_bgcolor",
|
||||
"point_estimate",
|
||||
|
|
|
@ -72,13 +72,14 @@ export const visionApplications: IApplications = <const>{
|
|||
class_names: fridgeObjectDetection.class_names,
|
||||
feature_names: fridgeObjectDetection.feature_names,
|
||||
features: fridgeObjectDetection.features,
|
||||
imageDimensions: fridgeObjectDetection.imageDimensions,
|
||||
image_dimensions: fridgeObjectDetection.image_dimensions,
|
||||
images: fridgeObjectDetection.images,
|
||||
object_detection_labels:
|
||||
fridgeObjectDetection.object_detection_labels,
|
||||
object_detection_predicted_y:
|
||||
fridgeObjectDetection.object_detection_predicted_y,
|
||||
object_detection_true_y:
|
||||
fridgeObjectDetection.object_detection_true_y,
|
||||
objectDetectionLabels: fridgeObjectDetection.objectDetectionLabels,
|
||||
predicted_y: fridgeObjectDetection.predicted_y,
|
||||
target_column: fridgeObjectDetection.target_column,
|
||||
task_type: fridgeObjectDetection.task_type,
|
||||
|
|
|
@ -91,7 +91,7 @@ export const fridgeObjectDetection: IDataset = {
|
|||
3024
|
||||
]
|
||||
],
|
||||
imageDimensions: [
|
||||
image_dimensions: [
|
||||
[499, 666],
|
||||
[499, 666],
|
||||
[499, 666],
|
||||
|
@ -99,6 +99,33 @@ export const fridgeObjectDetection: IDataset = {
|
|||
[499, 666]
|
||||
],
|
||||
images: fridgeObjectDetectionImages,
|
||||
object_detection_labels: [
|
||||
{
|
||||
aggregate: "2 correct, 0 incorrect",
|
||||
correct: "1 milk_bottle, 1 can",
|
||||
incorrect: "(none)"
|
||||
},
|
||||
{
|
||||
aggregate: "2 correct, 0 incorrect",
|
||||
correct: "1 milk_bottle, 1 can",
|
||||
incorrect: "(none)"
|
||||
},
|
||||
{
|
||||
aggregate: "2 correct, 0 incorrect",
|
||||
correct: "1 carton, 1 water_bottle",
|
||||
incorrect: "(none)"
|
||||
},
|
||||
{
|
||||
aggregate: "2 correct, 0 incorrect",
|
||||
correct: "1 can, 1 milk_bottle",
|
||||
incorrect: "(none)"
|
||||
},
|
||||
{
|
||||
aggregate: "2 correct, 0 incorrect",
|
||||
correct: "1 carton, 1 water_bottle",
|
||||
incorrect: "(none)"
|
||||
}
|
||||
],
|
||||
object_detection_predicted_y: [
|
||||
[
|
||||
[
|
||||
|
@ -173,33 +200,6 @@ export const fridgeObjectDetection: IDataset = {
|
|||
[2, 220, 130, 392, 505, 0]
|
||||
]
|
||||
],
|
||||
objectDetectionLabels: [
|
||||
{
|
||||
aggregate: "2 correct, 0 incorrect",
|
||||
correct: "1 milk_bottle, 1 can",
|
||||
incorrect: "(none)"
|
||||
},
|
||||
{
|
||||
aggregate: "2 correct, 0 incorrect",
|
||||
correct: "1 milk_bottle, 1 can",
|
||||
incorrect: "(none)"
|
||||
},
|
||||
{
|
||||
aggregate: "2 correct, 0 incorrect",
|
||||
correct: "1 carton, 1 water_bottle",
|
||||
incorrect: "(none)"
|
||||
},
|
||||
{
|
||||
aggregate: "2 correct, 0 incorrect",
|
||||
correct: "1 can, 1 milk_bottle",
|
||||
incorrect: "(none)"
|
||||
},
|
||||
{
|
||||
aggregate: "2 correct, 0 incorrect",
|
||||
correct: "1 carton, 1 water_bottle",
|
||||
incorrect: "(none)"
|
||||
}
|
||||
],
|
||||
predicted_y: [
|
||||
[1, 0, 1, 0],
|
||||
[1, 0, 1, 0],
|
||||
|
|
|
@ -31,7 +31,7 @@ When using the DPV2 run of the object detection ResponsibleAIVisionInsights dash
|
|||
|
||||
3. Wrapped ML model returned for use
|
||||
|
||||
4. The extracted data table is passed to the front end. The following three attributes are unique to the object detection scenario: `object_detection_true_y, 'object_detection_predicted_y`, and `imageDimensions`. See this ![mock data](https://github.com/microsoft/responsible-ai-toolbox/blob/main/apps/dashboard/src/model-assessment-vision/__mock_data__/fridgeObjectDetection.ts) for an example.
|
||||
4. The extracted data table is passed to the front end. The following three attributes are unique to the object detection scenario: `object_detection_true_y, 'object_detection_predicted_y`, and `image_dimensions`. See this ![mock data](https://github.com/microsoft/responsible-ai-toolbox/blob/main/apps/dashboard/src/model-assessment-vision/__mock_data__/fridgeObjectDetection.ts) for an example.
|
||||
|
||||
5. The dashboard is generated
|
||||
|
||||
|
|
|
@ -48,8 +48,8 @@ export interface IDataset {
|
|||
index?: string[];
|
||||
object_detection_true_y?: number[][][];
|
||||
object_detection_predicted_y?: number[][][];
|
||||
imageDimensions?: Array<[number, number]>;
|
||||
objectDetectionLabels?: IObjectDetectionLabelType[];
|
||||
image_dimensions?: Array<[number, number]>;
|
||||
object_detection_labels?: IObjectDetectionLabelType[];
|
||||
}
|
||||
|
||||
// TODO Remove DatasetSummary when possible
|
||||
|
|
|
@ -66,7 +66,7 @@ export function drawBox(
|
|||
colorCode: string,
|
||||
boxId: string
|
||||
): void {
|
||||
if (!dataset.imageDimensions) {
|
||||
if (!dataset.image_dimensions) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -125,7 +125,7 @@ export function drawBoundingBoxes(
|
|||
|
||||
// Drawing bounding boxes for each ground truth object
|
||||
for (const [oidx, gtObject] of trueY.entries()) {
|
||||
if (!dataset.imageDimensions) {
|
||||
if (!dataset.image_dimensions) {
|
||||
break;
|
||||
}
|
||||
const objectLabelIndex = gtObject[0] - 1;
|
||||
|
@ -137,7 +137,7 @@ export function drawBoundingBoxes(
|
|||
editor,
|
||||
dataset,
|
||||
scaleCoordinate,
|
||||
dataset.imageDimensions[oidx],
|
||||
dataset.image_dimensions[oidx],
|
||||
gtObject,
|
||||
annotation,
|
||||
theme.palette.green,
|
||||
|
@ -147,7 +147,7 @@ export function drawBoundingBoxes(
|
|||
|
||||
// Draws bounding boxes for each predicted object
|
||||
for (const [oidx, predObject] of predictedY.entries()) {
|
||||
if (!dataset.imageDimensions) {
|
||||
if (!dataset.image_dimensions) {
|
||||
break;
|
||||
}
|
||||
const objectLabelIndex = predObject[0] - 1;
|
||||
|
@ -161,7 +161,7 @@ export function drawBoundingBoxes(
|
|||
editor,
|
||||
dataset,
|
||||
scaleCoordinate,
|
||||
dataset.imageDimensions[oidx],
|
||||
dataset.image_dimensions[oidx],
|
||||
predObject,
|
||||
annotation,
|
||||
theme.palette.magenta,
|
||||
|
|
|
@ -88,9 +88,9 @@ export function preprocessData(
|
|||
const odPredictedY = typeof y === "undefined" ? defVal : y;
|
||||
const x = dataset.object_detection_true_y?.[index];
|
||||
const odTrueY = typeof x === "undefined" ? defVal : x;
|
||||
const i = dataset.objectDetectionLabels?.[index].incorrect;
|
||||
const c = dataset.objectDetectionLabels?.[index].correct;
|
||||
const a = dataset.objectDetectionLabels?.[index].aggregate;
|
||||
const i = dataset.object_detection_labels?.[index].incorrect;
|
||||
const c = dataset.object_detection_labels?.[index].correct;
|
||||
const a = dataset.object_detection_labels?.[index].aggregate;
|
||||
const odIncorrect = typeof i === "undefined" ? defVal : i;
|
||||
const odCorrect = typeof c === "undefined" ? defVal : c;
|
||||
const odAggregate = typeof a === "undefined" ? defVal : a;
|
||||
|
|
|
@ -45,7 +45,6 @@
|
|||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"from zipfile import ZipFile\n",
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
|
@ -53,10 +52,7 @@
|
|||
"matplotlib.use('Agg')\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from responsibleai_vision.common.constants import ImageColumns\n",
|
||||
"import json\n",
|
||||
"from raiutils.common.retries import retry_function\n",
|
||||
"\n",
|
||||
"import json\n",
|
||||
"import os\n",
|
||||
"import xml.etree.ElementTree as ET\n",
|
||||
"\n",
|
||||
|
@ -135,14 +131,15 @@
|
|||
" \n",
|
||||
" labels = load_fridge_object_detection_dataset_labels()\n",
|
||||
" \n",
|
||||
" # get all file names into a pandas dataframe with the labels\n",
|
||||
" data = pd.DataFrame(columns=[ImageColumns.IMAGE.value,\n",
|
||||
" ImageColumns.LABEL.value])\n",
|
||||
" feats = []\n",
|
||||
" for i, file in enumerate(os.listdir(\"./data/odFridgeObjects/\" + \"images\")):\n",
|
||||
" image_path = \"./data/odFridgeObjects/\" + \"images\" + \"/\" + file\n",
|
||||
" data = data.append({ImageColumns.IMAGE.value: image_path,\n",
|
||||
" ImageColumns.LABEL.value: labels[i]}, # folder\n",
|
||||
" ignore_index=True)\n",
|
||||
" feats.append({ImageColumns.IMAGE.value: image_path,\n",
|
||||
" ImageColumns.LABEL.value: labels[i]})\n",
|
||||
" \n",
|
||||
" # get all file names into a pandas dataframe with the labels\n",
|
||||
" data = pd.DataFrame(feats, columns=[ImageColumns.IMAGE.value,\n",
|
||||
" ImageColumns.LABEL.value])\n",
|
||||
" \n",
|
||||
" \n",
|
||||
" return data"
|
||||
|
@ -171,7 +168,7 @@
|
|||
" \"https://publictestdatasets.blob.core.windows.net/models/fastrcnn.pt\",\n",
|
||||
" os.path.join(filepath))\n",
|
||||
" else:\n",
|
||||
" print('Found' + filepath)\n",
|
||||
" print('Found ' + filepath)\n",
|
||||
"\n",
|
||||
" return filepath"
|
||||
]
|
||||
|
@ -185,12 +182,13 @@
|
|||
"#Loading in our pretrained model \n",
|
||||
"import torchvision\n",
|
||||
"from torchvision.models.detection.faster_rcnn import FastRCNNPredictor\n",
|
||||
"from torchvision.models.detection.faster_rcnn import FasterRCNN_ResNet50_FPN_Weights\n",
|
||||
"import torch\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"def get_instance_segmentation_model(num_classes):\n",
|
||||
" # load an instance segmentation model pre-trained on COCO\n",
|
||||
" model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)\n",
|
||||
" model = torchvision.models.detection.fasterrcnn_resnet50_fpn(weights=FasterRCNN_ResNet50_FPN_Weights.COCO_V1)\n",
|
||||
" in_features = model.roi_heads.box_predictor.cls_score.in_features\n",
|
||||
" # replace the pre-trained head with a new one\n",
|
||||
" model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n",
|
||||
|
|
|
@ -37,7 +37,7 @@ class Dataset:
|
|||
index: Optional[List[str]]
|
||||
object_detection_true_y: Optional[List]
|
||||
object_detection_predicted_y: Optional[List]
|
||||
imageDimensions: Optional[List[List[int]]]
|
||||
image_dimensions: Optional[List[List[int]]]
|
||||
object_detection_labels: Optional[List[Dict[str, str]]]
|
||||
|
||||
|
||||
|
|
|
@ -679,7 +679,7 @@ class RAIVisionInsights(RAIBaseInsights):
|
|||
encoded_images.append(b64)
|
||||
|
||||
# passing to frontend to draw bounding boxes with the correct scale
|
||||
dashboard_dataset.imageDimensions = image_dimensions
|
||||
dashboard_dataset.image_dimensions = image_dimensions
|
||||
|
||||
if len(encoded_images) > 0:
|
||||
dashboard_dataset.images = encoded_images
|
||||
|
@ -745,12 +745,12 @@ class RAIVisionInsights(RAIBaseInsights):
|
|||
|
||||
rendered_labels[_CORRECT] = ', '.join(
|
||||
f'{value} {key}' for key, value in
|
||||
image_labels[_CORRECT].items())
|
||||
image_labels[_CORRECT].items() if value > 0)
|
||||
if len(rendered_labels[_CORRECT]) == 0:
|
||||
rendered_labels[_CORRECT] = _NOLABEL
|
||||
rendered_labels[_INCORRECT] = ', '.join(
|
||||
f'{value} {key}' for key, value in
|
||||
image_labels[_INCORRECT].items())
|
||||
image_labels[_INCORRECT].items() if value > 0)
|
||||
if len(rendered_labels[_INCORRECT]) == 0:
|
||||
rendered_labels[_INCORRECT] = _NOLABEL
|
||||
rendered_labels[_AGGREGATE_LABEL] = \
|
||||
|
|
Загрузка…
Ссылка в новой задаче