This commit is contained in:
Daniela Massiceti 2022-12-16 08:48:40 +00:00
Родитель 2794707d6e
Коммит cbcc07087e
3 изменённых файлов: 95 добавлений и 16 удалений

Просмотреть файл

@ -190,7 +190,7 @@ class Learner:
if (step+1) % self.args.num_test_tasks == 0:
self.test_evaluator.set_current_user(task_dict["task_id"])
_,_,_,current_video_stats = self.test_evaluator.get_mean_stats(current_user=True)
print_and_log(self.logfile, f'{self.args.test_set} user {task_dict["task_id"]} ({self.test_evaluator.current_user+1}/{len(self.test_queue)}) stats: {stats_to_str(current_video_stats)} avg. #context clips/task: {np.mean(num_context_clips_per_task)} avg. #target clips/task: {np.mean(num_target_clips_per_task)}')
print_and_log(self.logfile, f'{self.args.test_set} user {task_dict["task_id"]} ({self.test_evaluator.current_user+1}/{len(self.test_queue)}) stats: {stats_to_str(current_video_stats)} avg. #context clips/task: {np.mean(num_context_clips_per_task):.0f} avg. #target clips/task: {np.mean(num_target_clips_per_task):.0f}')
if (step+1) < num_test_tasks:
num_context_clips_per_task, num_target_clips_per_task = [], []
self.test_evaluator.next_user()

Просмотреть файл

@ -25,14 +25,22 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Dataset already saved at /hdd/data/orbitdataset/orbit_benchmark/orbit_benchmark_a_224/test.\n"
]
}
],
"source": [
"from pathlib import Path\n",
"\n",
"DATA_ROOT = \"orbit_benchmark\"\n",
"DATA_SPLIT = \"validation\"\n",
"DATA_ROOT = \"/hdd/data/orbitdataset/orbit_benchmark/orbit_benchmark_a_224\"\n",
"DATA_SPLIT = \"test\"\n",
"validation_path = Path(DATA_ROOT, DATA_SPLIT)\n",
"from pathlib import Path\n",
"\n",
@ -62,9 +70,45 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Creating data queue...\n",
"Filtering context frames ['no_object_not_present_issue'].\n",
"Filtering target frames ['no_object_not_present_issue'].\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Loading test users from /hdd/data/orbitdataset/orbit_benchmark/orbit_benchmark_a_224/test: 100%|██████████| 17/17 [00:07<00:00, 2.29it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Min context frames/obj: 561 (P642 'tumble dryer')\n",
"Min target frames/obj: 76 (P198 'ipod in wallet')\n",
"Max context frames/obj: 9095 (P421 'table fan')\n",
"Max target frames/obj: 3500 (P901 'house door')\n",
"Loaded data summary: 17 users, 158 objects, 1195 videos (#context: 898, #target: 297)\n",
"Created data queue, queue uses 2 workers.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n"
]
}
],
"source": [
"from data.queues import UserEpisodicDatasetQueue\n",
"\n",
@ -104,9 +148,17 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 8,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Checkpoint already exists at orbit_pretrained_checkpoints/orbit_cluve_protonets_cosine_vit_b_32_224_lite.pth.\n"
]
}
],
"source": [
"checkpoint_path = Path(\"orbit_pretrained_checkpoints\", \"orbit_cluve_protonets_cosine_vit_b_32_224_lite.pth\")\n",
"\n",
@ -121,9 +173,17 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Instance of SingleStepFewShotRecogniser created on device cuda:0.\n"
]
}
],
"source": [
"import torch\n",
"from model.few_shot_recognisers import SingleStepFewShotRecogniser\n",
@ -167,9 +227,28 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 10,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Running evaluation...\n"
]
},
{
"ename": "ValueError",
"evalue": "Unknown format code 'd' for object of type 'float'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m/tmp/ipykernel_89556/704779747.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 54\u001b[0m \u001b[0mevaluator\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_current_user\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtask\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"task_id\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 55\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mcurrent_video_stats\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mevaluator\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_mean_stats\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcurrent_user\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 56\u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"User {task['task_id']} ({evaluator.current_user+1}/{len(data_queue)}) {get_stats_str(current_video_stats)}, avg #context clips/task: {np.mean(num_context_clips_per_task):d}, avg #target clips/task: {np.mean(num_target_clips_per_task):d}\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 57\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m<\u001b[0m \u001b[0mnum_test_tasks\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m \u001b[0mnum_context_clips_per_task\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mValueError\u001b[0m: Unknown format code 'd' for object of type 'float'"
]
}
],
"source": [
"import numpy as np\n",
"from typing import Dict, Tuple\n",
@ -226,7 +305,7 @@
" if (step+1) % data_queue.num_tasks == 0:\n",
" evaluator.set_current_user(task[\"task_id\"])\n",
" _,_,_,current_video_stats = evaluator.get_mean_stats(current_user=True)\n",
" print(f\"User {task['task_id']} ({evaluator.current_user+1}/{len(data_queue)}) {get_stats_str(current_video_stats)}, avg #context clips/task: {np.mean(num_context_clips_per_task):d}, avg #target clips/task: {np.mean(num_target_clips_per_task):d}\")\n",
" print(f\"User {task['task_id']} ({evaluator.current_user+1}/{len(data_queue)}) {get_stats_str(current_video_stats)}, avg #context clips/task: {np.mean(num_context_clips_per_task):.0f}, avg #target clips/task: {np.mean(num_target_clips_per_task):.0f}\")\n",
" if (step+1) < num_test_tasks:\n",
" num_context_clips_per_task = []\n",
" num_target_clips_per_task = []\n",

Просмотреть файл

@ -277,7 +277,7 @@ class Learner:
if (step+1) % self.args.num_val_tasks == 0:
self.validation_evaluator.set_current_user(task_dict["task_id"])
_,_,_,current_video_stats = self.validation_evaluator.get_mean_stats(current_user=True)
print_and_log(self.logfile, f'validation user {task_dict["task_id"]} ({self.validation_evaluator.current_user+1}/{len(self.validation_queue)}) stats: {stats_to_str(current_video_stats)} avg. #context clips/task: {np.mean(num_context_clips_per_task)} avg. #target clips/task: {np.mean(num_target_clips_per_task)}')
print_and_log(self.logfile, f'validation user {task_dict["task_id"]} ({self.validation_evaluator.current_user+1}/{len(self.validation_queue)}) stats: {stats_to_str(current_video_stats)} avg. #context clips/task: {np.mean(num_context_clips_per_task):.0f} avg. #target clips/task: {np.mean(num_target_clips_per_task):.0f}')
if (step+1) < num_val_tasks:
num_context_clips_per_task, num_target_clips_per_task = [], []
self.validation_evaluator.next_user()
@ -344,7 +344,7 @@ class Learner:
if (step+1) % self.args.num_test_tasks == 0:
self.test_evaluator.set_current_user(task_dict["task_id"])
_,_,_,current_video_stats = self.test_evaluator.get_mean_stats(current_user=True)
print_and_log(self.logfile, f'{self.args.test_set} user {task_dict["task_id"]} ({self.test_evaluator.current_user+1}/{len(self.test_queue)}) stats: {stats_to_str(current_video_stats)} avg. #context clips/task: {np.mean(num_context_clips_per_task)} avg. #target clips/task: {np.mean(num_target_clips_per_task)}')
print_and_log(self.logfile, f'{self.args.test_set} user {task_dict["task_id"]} ({self.test_evaluator.current_user+1}/{len(self.test_queue)}) stats: {stats_to_str(current_video_stats)} avg. #context clips/task: {np.mean(num_context_clips_per_task):.0f} avg. #target clips/task: {np.mean(num_target_clips_per_task):.0f}')
if (step+1) < num_test_tasks:
num_context_clips_per_task, num_target_clips_per_task = [], []
self.test_evaluator.next_user()