From bc109ae5b8674ce6247abca649ba28c903979be3 Mon Sep 17 00:00:00 2001 From: abhishek thakur Date: Thu, 28 Jan 2021 21:10:46 +0100 Subject: [PATCH] pin_memory -> dataloader_pin_memory (#9874) --- src/transformers/trainer.py | 6 +++--- src/transformers/training_args.py | 6 ++++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 3e62db769..8ca84beef 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -485,7 +485,7 @@ class Trainer: collate_fn=self.data_collator, drop_last=self.args.dataloader_drop_last, num_workers=self.args.dataloader_num_workers, - pin_memory=self.args.pin_memory, + pin_memory=self.args.dataloader_pin_memory, ) def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]: @@ -523,7 +523,7 @@ class Trainer: collate_fn=self.data_collator, drop_last=self.args.dataloader_drop_last, num_workers=self.args.dataloader_num_workers, - pin_memory=self.args.pin_memory, + pin_memory=self.args.dataloader_pin_memory, ) def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader: @@ -550,7 +550,7 @@ class Trainer: batch_size=self.args.eval_batch_size, collate_fn=self.data_collator, drop_last=self.args.dataloader_drop_last, - pin_memory=self.args.pin_memory, + pin_memory=self.args.dataloader_pin_memory, ) def create_optimizer_and_scheduler(self, num_training_steps: int): diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index e4e8aad96..71d525594 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -244,7 +244,7 @@ class TrainingArguments: When using distributed training, the value of the flag :obj:`find_unused_parameters` passed to :obj:`DistributedDataParallel`. Will default to :obj:`False` if gradient checkpointing is used, :obj:`True` otherwise. - pin_memory (:obj:`bool`, `optional`, defaults to :obj:`True`)): + dataloader_pin_memory (:obj:`bool`, `optional`, defaults to :obj:`True`)): Whether you want to pin memory in data loaders or not. Will default to :obj:`True`. """ @@ -438,7 +438,9 @@ class TrainingArguments: "`DistributedDataParallel`." }, ) - pin_memory: bool = field(default=True, metadata={"help": "Whether or not to pin memory for data loaders."}) + dataloader_pin_memory: bool = field( + default=True, metadata={"help": "Whether or not to pin memory for DataLoader."} + ) _n_gpu: int = field(init=False, repr=False, default=-1) def __post_init__(self):