Bert-base-uncased model: Difference between revisions

From AI Wiki
No edit summary
(Replaced content with "{{Model infobox | hugging-face-uri = bert-base-uncased | creator = | type = Natural Language Processing | task = Fill-Mask | library = PyTorch, TensorFlow, JAX, Rust, Core ML, Safetensors, Transformers | dataset = bookcorpus, wikipedia | language = English | paper = arxiv:1810.04805 | license = apache-2.0 | related-to = bert, exbert, AutoTrain Compatible | all-tags = Fill-Mask, PyTorch, TensorFlow, JAX, Rust, Core ML, Safetensors, Transformers, bookcorpus, wikipedia...")
Tag: Replaced
 
Line 4: Line 4:
| type = Natural Language Processing
| type = Natural Language Processing
| task = Fill-Mask
| task = Fill-Mask
| library = PyTorch, TensorFlow, JAX, Rust, Safetensors, Transformers
| library = PyTorch, TensorFlow, JAX, Rust, Core ML, Safetensors, Transformers
| dataset = bookcorpus, wikipedia
| dataset = bookcorpus, wikipedia
| language = English
| language = English
Line 10: Line 10:
| license = apache-2.0
| license = apache-2.0
| related-to = bert, exbert, AutoTrain Compatible
| related-to = bert, exbert, AutoTrain Compatible
| all-tags = Fill-Mask, PyTorch, TensorFlow, JAX, Rust, Safetensors, Transformers, bookcorpus, wikipedia, English, bert, exbert, AutoTrain Compatible, arxiv:1810.04805, License: apache-2.0
| all-tags = Fill-Mask, PyTorch, TensorFlow, JAX, Rust, Core ML, Safetensors, Transformers, bookcorpus, wikipedia, English, bert, exbert, AutoTrain Compatible, arxiv:1810.04805, License: apache-2.0
| all-lang-tags = English
| all-lang-tags = English
}}
}}
Line 98: Line 98:


===Amazon SageMaker===
===Amazon SageMaker===
<tabber>
  <pre>
|-|Automatic Speech Recognition=
  import sagemaker
{{#tag:tabber|
AWS=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import sagemaker
 
role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'automatic-speech-recognition'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import boto3
 
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'automatic-speech-recognition'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
}}
|-|Conversational=
{{#tag:tabber|
AWS=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import sagemaker
 
role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'conversational'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import boto3
 
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'conversational'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
}}
|-|Feature Extraction=
{{#tag:tabber|
AWS=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import sagemaker
 
role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'feature-extraction'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import boto3
import boto3
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'feature-extraction'
}
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
}}
|-|Fill-Mask=
{{#tag:tabber|
AWS=
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
import sagemaker
role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'fill-mask'
}


# create Hugging Face Model Class
try:
huggingface_model = HuggingFaceModel(
role = sagemaker.get_execution_role()
transformers_version='4.17.0',
except ValueError:
pytorch_version='1.10.2',
iam = boto3.client('iam')
py_version='py38',
role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn']
env=hub,
role=role,
)


# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import boto3
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
# Hub Model configuration. https://huggingface.co/models
hub = {
hub = {
Line 349: Line 117:
# create Hugging Face Model Class
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
transformers_version='4.26.0',
pytorch_version='1.10.2',
pytorch_version='1.13.1',
py_version='py38',
py_version='py39',
env=hub,
env=hub,
role=role,  
role=role,  
Line 363: Line 131:


predictor.predict({
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
"inputs": "The answer to the universe is [MASK].",
})
})
</pre>
  </pre>
}}
|-|Image Classification=
{{#tag:tabber|
AWS=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import sagemaker
 
role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'image-classification'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import boto3
 
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'image-classification'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
}}
|-|Question Answering=
{{#tag:tabber|
AWS=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import sagemaker
 
role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'question-answering'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import boto3
 
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'question-answering'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
}}
|-|Summarization=
{{#tag:tabber|
AWS=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import sagemaker
 
role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'summarization'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import boto3
 
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'summarization'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
}}
|-|Table Question Answering=
{{#tag:tabber|
AWS=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import sagemaker
 
role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'table-question-answering'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import boto3
 
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'table-question-answering'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
}}
|-|Text Classification=
{{#tag:tabber|
AWS=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import sagemaker
 
role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'text-classification'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import boto3
 
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'text-classification'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
}}
|-|Text Generation=
{{#tag:tabber|
AWS=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import sagemaker
 
role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'text-generation'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import boto3
 
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'text-generation'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
}}
|-|Text2Text Generation=
{{#tag:tabber|
AWS=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import sagemaker
 
role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'text2text-generation'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import boto3
 
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'text2text-generation'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
}}
|-|Token Classification=
{{#tag:tabber|
AWS=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import sagemaker
 
role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'token-classification'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import boto3
 
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'token-classification'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
}}
|-|Translation=
{{#tag:tabber|
AWS=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import sagemaker
 
role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'translation'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import boto3
 
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'translation'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
}}
|-|Zero-Shot Classification=
{{#tag:tabber|
AWS=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import sagemaker
 
role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'zero-shot-classification'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
from sagemaker.huggingface import HuggingFaceModel
import boto3
 
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'bert-base-uncased',
'HF_TASK':'zero-shot-classification'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "The answer to the universe is [MASK]."
})
</pre>
}}
</tabber>


===Spaces===
===Spaces===
Line 1,048: Line 144:
==Training==
==Training==
===Amazon SageMaker===
===Amazon SageMaker===
<tabber>
|-|Causal Language Modeling=
{{#tag:tabber|
AWS=
<pre>
import sagemaker
from sagemaker.huggingface import HuggingFace
# gets role for executing training job
role = sagemaker.get_execution_role()
hyperparameters = {
'model_name_or_path':'bert-base-uncased',
'output_dir':'/opt/ml/model'
# add your remaining hyperparameters
# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling
}
# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
entry_point='run_clm.py',
source_dir='./examples/pytorch/language-modeling',
instance_type='ml.p3.2xlarge',
instance_count=1,
role=role,
git_config=git_config,
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
hyperparameters = hyperparameters
)
# starting the train job
huggingface_estimator.fit()
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
import sagemaker
import boto3
from sagemaker.huggingface import HuggingFace
# gets role for executing training job
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
hyperparameters = {
'model_name_or_path':'bert-base-uncased',
'output_dir':'/opt/ml/model'
# add your remaining hyperparameters
# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling
}
# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
entry_point='run_clm.py',
source_dir='./examples/pytorch/language-modeling',
instance_type='ml.p3.2xlarge',
instance_count=1,
role=role,
git_config=git_config,
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
hyperparameters = hyperparameters
)
# starting the train job
huggingface_estimator.fit()
</pre>
}}
|-|Masked Language Modeling=
{{#tag:tabber|
AWS=
<pre>
import sagemaker
from sagemaker.huggingface import HuggingFace
# gets role for executing training job
role = sagemaker.get_execution_role()
hyperparameters = {
'model_name_or_path':'bert-base-uncased',
'output_dir':'/opt/ml/model'
# add your remaining hyperparameters
# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling
}
# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
entry_point='run_mlm.py',
source_dir='./examples/pytorch/language-modeling',
instance_type='ml.p3.2xlarge',
instance_count=1,
role=role,
git_config=git_config,
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
hyperparameters = hyperparameters
)
# starting the train job
huggingface_estimator.fit()
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
import sagemaker
import boto3
from sagemaker.huggingface import HuggingFace
# gets role for executing training job
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
hyperparameters = {
'model_name_or_path':'bert-base-uncased',
'output_dir':'/opt/ml/model'
# add your remaining hyperparameters
# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling
}
# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
entry_point='run_mlm.py',
source_dir='./examples/pytorch/language-modeling',
instance_type='ml.p3.2xlarge',
instance_count=1,
role=role,
git_config=git_config,
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
hyperparameters = hyperparameters
)
# starting the train job
huggingface_estimator.fit()
</pre>
}}
|-|Question Answering=
{{#tag:tabber|
AWS=
<pre>
import sagemaker
from sagemaker.huggingface import HuggingFace
# gets role for executing training job
role = sagemaker.get_execution_role()
hyperparameters = {
'model_name_or_path':'bert-base-uncased',
'output_dir':'/opt/ml/model'
# add your remaining hyperparameters
# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/question-answering
}
# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
entry_point='run_qa.py',
source_dir='./examples/pytorch/question-answering',
instance_type='ml.p3.2xlarge',
instance_count=1,
role=role,
git_config=git_config,
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
hyperparameters = hyperparameters
)
# starting the train job
huggingface_estimator.fit()
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
import sagemaker
import boto3
from sagemaker.huggingface import HuggingFace
# gets role for executing training job
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
hyperparameters = {
'model_name_or_path':'bert-base-uncased',
'output_dir':'/opt/ml/model'
# add your remaining hyperparameters
# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/question-answering
}
# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
entry_point='run_qa.py',
source_dir='./examples/pytorch/question-answering',
instance_type='ml.p3.2xlarge',
instance_count=1,
role=role,
git_config=git_config,
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
hyperparameters = hyperparameters
)
# starting the train job
huggingface_estimator.fit()
</pre>
}}
|-|Summarization=
{{#tag:tabber|
AWS=
<pre>
import sagemaker
from sagemaker.huggingface import HuggingFace
# gets role for executing training job
role = sagemaker.get_execution_role()
hyperparameters = {
'model_name_or_path':'bert-base-uncased',
'output_dir':'/opt/ml/model'
# add your remaining hyperparameters
# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq
}
# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
entry_point='run_summarization.py',
source_dir='./examples/pytorch/seq2seq',
instance_type='ml.p3.2xlarge',
instance_count=1,
role=role,
git_config=git_config,
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
hyperparameters = hyperparameters
)
# starting the train job
huggingface_estimator.fit()
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
import sagemaker
import boto3
from sagemaker.huggingface import HuggingFace
# gets role for executing training job
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
hyperparameters = {
'model_name_or_path':'bert-base-uncased',
'output_dir':'/opt/ml/model'
# add your remaining hyperparameters
# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq
}
# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
entry_point='run_summarization.py',
source_dir='./examples/pytorch/seq2seq',
instance_type='ml.p3.2xlarge',
instance_count=1,
role=role,
git_config=git_config,
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
hyperparameters = hyperparameters
)
# starting the train job
huggingface_estimator.fit()
</pre>
}}
|-|Text Classification=
{{#tag:tabber|
AWS=
<pre>
import sagemaker
from sagemaker.huggingface import HuggingFace
# gets role for executing training job
role = sagemaker.get_execution_role()
hyperparameters = {
'model_name_or_path':'bert-base-uncased',
'output_dir':'/opt/ml/model'
# add your remaining hyperparameters
# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/text-classification
}
# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
entry_point='run_glue.py',
source_dir='./examples/pytorch/text-classification',
instance_type='ml.p3.2xlarge',
instance_count=1,
role=role,
git_config=git_config,
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
hyperparameters = hyperparameters
)
# starting the train job
huggingface_estimator.fit()
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
import sagemaker
import boto3
from sagemaker.huggingface import HuggingFace
# gets role for executing training job
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
hyperparameters = {
'model_name_or_path':'bert-base-uncased',
'output_dir':'/opt/ml/model'
# add your remaining hyperparameters
# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/text-classification
}
# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
entry_point='run_glue.py',
source_dir='./examples/pytorch/text-classification',
instance_type='ml.p3.2xlarge',
instance_count=1,
role=role,
git_config=git_config,
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
hyperparameters = hyperparameters
)
# starting the train job
huggingface_estimator.fit()
</pre>
}}
|-|Token Classification=
{{#tag:tabber|
AWS=
<pre>
import sagemaker
from sagemaker.huggingface import HuggingFace
# gets role for executing training job
role = sagemaker.get_execution_role()
hyperparameters = {
'model_name_or_path':'bert-base-uncased',
'output_dir':'/opt/ml/model'
# add your remaining hyperparameters
# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/token-classification
}
# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
entry_point='run_ner.py',
source_dir='./examples/pytorch/token-classification',
instance_type='ml.p3.2xlarge',
instance_count=1,
role=role,
git_config=git_config,
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
hyperparameters = hyperparameters
)
# starting the train job
huggingface_estimator.fit()
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
import sagemaker
import boto3
from sagemaker.huggingface import HuggingFace
# gets role for executing training job
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
hyperparameters = {
'model_name_or_path':'bert-base-uncased',
'output_dir':'/opt/ml/model'
# add your remaining hyperparameters
# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/token-classification
}
# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
entry_point='run_ner.py',
source_dir='./examples/pytorch/token-classification',
instance_type='ml.p3.2xlarge',
instance_count=1,
role=role,
git_config=git_config,
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
hyperparameters = hyperparameters
)
# starting the train job
huggingface_estimator.fit()
</pre>
}}
|-|Translation=
{{#tag:tabber|
AWS=
<pre>
import sagemaker
from sagemaker.huggingface import HuggingFace
# gets role for executing training job
role = sagemaker.get_execution_role()
hyperparameters = {
'model_name_or_path':'bert-base-uncased',
'output_dir':'/opt/ml/model'
# add your remaining hyperparameters
# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq
}
# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
entry_point='run_translation.py',
source_dir='./examples/pytorch/seq2seq',
instance_type='ml.p3.2xlarge',
instance_count=1,
role=role,
git_config=git_config,
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
hyperparameters = hyperparameters
)
# starting the train job
huggingface_estimator.fit()
</pre>
{{!}}-{{!}}
Local Machine=
<pre>
import sagemaker
import boto3
from sagemaker.huggingface import HuggingFace
# gets role for executing training job
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
hyperparameters = {
'model_name_or_path':'bert-base-uncased',
'output_dir':'/opt/ml/model'
# add your remaining hyperparameters
# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq
}
# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
entry_point='run_translation.py',
source_dir='./examples/pytorch/seq2seq',
instance_type='ml.p3.2xlarge',
instance_count=1,
role=role,
git_config=git_config,
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
hyperparameters = hyperparameters
)
# starting the train job
huggingface_estimator.fit()
</pre>
}}
</tabber>


==Model Card==
==Model Card==

Latest revision as of 01:03, 11 June 2023

Bert-base-uncased model is a Natural Language Processing model used for Fill-Mask.

Model Description

Clone Model Repository

#Be sure to have git-lfs installed (https://git-lfs.com)
git lfs install
git clone https://huggingface.co/bert-base-uncased
  
#To clone the repo without large files – just their pointers
#prepend git clone with the following env var:
GIT_LFS_SKIP_SMUDGE=1

#Be sure to have git-lfs installed (https://git-lfs.com)
git lfs install
git clone [email protected]:bert-base-uncased
  
#To clone the repo without large files – just their pointers
#prepend git clone with the following env var:
GIT_LFS_SKIP_SMUDGE=1

Hugging Face Transformers Library

from transformers import AutoTokenizer, AutoModelForMaskedLM

tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")

model = AutoModelForMaskedLM.from_pretrained("bert-base-uncased")

Deployment

Inference API

import requests

API_URL = "https://api-inference.huggingface.co/models/bert-base-uncased"
headers = {"Authorization": f"Bearer {API_TOKEN}"}

def query(payload):
	response = requests.post(API_URL, headers=headers, json=payload)
	return response.json()
	
output = query({
	"inputs": "The answer to the universe is [MASK].",
})

async function query(data) {
	const response = await fetch(
		"https://api-inference.huggingface.co/models/bert-base-uncased",
		{
			headers: { Authorization: "Bearer {API_TOKEN}" },
			method: "POST",
			body: JSON.stringify(data),
		}
	);
	const result = await response.json();
	return result;
}

query({"inputs": "The answer to the universe is [MASK]."}).then((response) => {
	console.log(JSON.stringify(response));
});

curl https://api-inference.huggingface.co/models/bert-base-uncased \
	-X POST \
	-d '{"inputs": "The answer to the universe is [MASK]."}' \
	-H "Authorization: Bearer {API_TOKEN}"

Amazon SageMaker

  import sagemaker
import boto3
from sagemaker.huggingface import HuggingFaceModel

try:
	role = sagemaker.get_execution_role()
except ValueError:
	iam = boto3.client('iam')
	role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn']

# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'bert-base-uncased',
	'HF_TASK':'fill-mask'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.26.0',
	pytorch_version='1.13.1',
	py_version='py39',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	"inputs": "The answer to the universe is [MASK].",
})
  

Spaces

import gradio as gr

gr.Interface.load("models/bert-base-uncased").launch()

Training

Amazon SageMaker

Model Card

Comments

Loading comments...