pin_memory -> dataloader_pin_memory (#9874)

This commit is contained in:
abhishek thakur 2021-01-28 21:10:46 +01:00 коммит произвёл GitHub
Родитель 80e4184fb0
Коммит bc109ae5b8
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
2 изменённых файлов: 7 добавлений и 5 удалений

Просмотреть файл

@ -485,7 +485,7 @@ class Trainer:
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.pin_memory,
pin_memory=self.args.dataloader_pin_memory,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
@ -523,7 +523,7 @@ class Trainer:
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.pin_memory,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
@ -550,7 +550,7 @@ class Trainer:
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
pin_memory=self.args.pin_memory,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):

Просмотреть файл

@ -244,7 +244,7 @@ class TrainingArguments:
When using distributed training, the value of the flag :obj:`find_unused_parameters` passed to
:obj:`DistributedDataParallel`. Will default to :obj:`False` if gradient checkpointing is used, :obj:`True`
otherwise.
pin_memory (:obj:`bool`, `optional`, defaults to :obj:`True`)):
dataloader_pin_memory (:obj:`bool`, `optional`, defaults to :obj:`True`)):
Whether you want to pin memory in data loaders or not. Will default to :obj:`True`.
"""
@ -438,7 +438,9 @@ class TrainingArguments:
"`DistributedDataParallel`."
},
)
pin_memory: bool = field(default=True, metadata={"help": "Whether or not to pin memory for data loaders."})
dataloader_pin_memory: bool = field(
default=True, metadata={"help": "Whether or not to pin memory for DataLoader."}
)
_n_gpu: int = field(init=False, repr=False, default=-1)
def __post_init__(self):