config=torch.hub.load('huggingface/transformers','config','bert-base-uncased')# Download configuration from S3 and cache.
config=torch.hub.load('huggingface/transformers','config','./test/bert_saved_model/')# E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
tokenizer=torch.hub.load('huggingface/transformers','tokenizer','bert-base-uncased')# Download vocabulary from S3 and cache.
tokenizer=torch.hub.load('huggingface/transformers','tokenizer','./test/bert_saved_model/')# E.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`
model=torch.hub.load('huggingface/transformers','modelWithLMHead','bert-base-uncased')# Download model and configuration from S3 and cache.
model=torch.hub.load('huggingface/transformers','modelWithLMHead','./test/bert_model/')# E.g. model was saved using `save_pretrained('./test/saved_model/')`
model=torch.hub.load('huggingface/transformers','modelWithLMHead','bert-base-uncased',output_attention=True)# Update configuration during loading
model=torch.hub.load('huggingface/transformers','modelForSequenceClassification','bert-base-uncased')# Download model and configuration from S3 and cache.
model=torch.hub.load('huggingface/transformers','modelForSequenceClassification','./test/bert_model/')# E.g. model was saved using `save_pretrained('./test/saved_model/')`
model=torch.hub.load('huggingface/transformers','modelForSequenceClassification','bert-base-uncased',output_attention=True)# Update configuration during loading
model=torch.hub.load('huggingface/transformers','modelForQuestionAnswering','bert-base-uncased')# Download model and configuration from S3 and cache.
model=torch.hub.load('huggingface/transformers','modelForQuestionAnswering','./test/bert_model/')# E.g. model was saved using `save_pretrained('./test/saved_model/')`
model=torch.hub.load('huggingface/transformers','modelForQuestionAnswering','bert-base-uncased',output_attention=True)# Update configuration during loading