|
|
(One intermediate revision by one other user not shown) |
Line 4: |
Line 4: |
| | type = Natural Language Processing | | | type = Natural Language Processing |
| | task = Fill-Mask | | | task = Fill-Mask |
| | library = PyTorch, TensorFlow, JAX, Rust, Safetensors, Transformers | | | library = PyTorch, TensorFlow, JAX, Rust, Core ML, Safetensors, Transformers |
| | dataset = bookcorpus, wikipedia | | | dataset = bookcorpus, wikipedia |
| | language = English | | | language = English |
| | paper = | | | paper = arxiv:1810.04805 |
| | license = arxiv:1810.04805, apache-2.0
| | | license = apache-2.0 |
| | related-to = bert, exbert, AutoTrain Compatible | | | related-to = bert, exbert, AutoTrain Compatible |
| | all-tags = Fill-Mask, PyTorch, TensorFlow, JAX, Rust, Safetensors, Transformers, bookcorpus, wikipedia, English, bert, exbert, AutoTrain Compatible, arxiv:1810.04805, License: apache-2.0 | | | all-tags = Fill-Mask, PyTorch, TensorFlow, JAX, Rust, Core ML, Safetensors, Transformers, bookcorpus, wikipedia, English, bert, exbert, AutoTrain Compatible, arxiv:1810.04805, License: apache-2.0 |
| | all-lang-tags = English | | | all-lang-tags = English |
| }} | | }} |
|
| |
|
| ==Model Description== | | ==Model Description== |
|
| |
| ==Comments==
| |
| <comments />
| |
|
| |
|
| ==Clone Model Repository== | | ==Clone Model Repository== |
Line 101: |
Line 98: |
|
| |
|
| ===Amazon SageMaker=== | | ===Amazon SageMaker=== |
| <tabber>
| | <pre> |
| |-|Automatic Speech Recognition=
| | import sagemaker |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre> | |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'automatic-speech-recognition'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3 | | import boto3 |
|
| |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'automatic-speech-recognition'
| |
| }
| |
|
| |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
|
| |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
|
| |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Conversational=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel | | from sagemaker.huggingface import HuggingFaceModel |
| import sagemaker
| |
|
| |
|
| role = sagemaker.get_execution_role() | | try: |
| # Hub Model configuration. https://huggingface.co/models
| | role = sagemaker.get_execution_role() |
| hub = {
| | except ValueError: |
| 'HF_MODEL_ID':'bert-base-uncased', | | iam = boto3.client('iam') |
| 'HF_TASK':'conversational'
| | role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] |
| }
| |
|
| |
|
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
|
| |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
|
| |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
|
| |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'conversational'
| |
| }
| |
|
| |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
|
| |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
|
| |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Feature Extraction=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
|
| |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'feature-extraction'
| |
| }
| |
|
| |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
|
| |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
|
| |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
|
| |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'feature-extraction'
| |
| }
| |
|
| |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
|
| |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
|
| |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Fill-Mask=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
|
| |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models | | # Hub Model configuration. https://huggingface.co/models |
| hub = { | | hub = { |
Line 319: |
Line 117: |
| # create Hugging Face Model Class | | # create Hugging Face Model Class |
| huggingface_model = HuggingFaceModel( | | huggingface_model = HuggingFaceModel( |
| transformers_version='4.17.0', | | transformers_version='4.26.0', |
| pytorch_version='1.10.2', | | pytorch_version='1.13.1', |
| py_version='py38', | | py_version='py39', |
| env=hub, | | env=hub, |
| role=role, | | role=role, |
Line 333: |
Line 131: |
|
| |
|
| predictor.predict({ | | predictor.predict({ |
| 'inputs': "The answer to the universe is [MASK]." | | "inputs": "The answer to the universe is [MASK].", |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'fill-mask'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Image Classification=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'image-classification'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| }) | | }) |
| </pre> | | </pre> |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'image-classification'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Question Answering=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'question-answering'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'question-answering'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Summarization=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'summarization'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'summarization'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Table Question Answering=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'table-question-answering'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'table-question-answering'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Text Classification=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'text-classification'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'text-classification'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Text Generation=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'text-generation'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'text-generation'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Text2Text Generation=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'text2text-generation'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'text2text-generation'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Token Classification=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'token-classification'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'token-classification'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Translation=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'translation'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'translation'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Zero-Shot Classification=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'zero-shot-classification'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'bert-base-uncased',
| |
| 'HF_TASK':'zero-shot-classification'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': "The answer to the universe is [MASK]."
| |
| })
| |
| </pre>
| |
| }}
| |
| </tabber>
| |
|
| |
|
| ===Spaces=== | | ===Spaces=== |
Line 1,051: |
Line 144: |
| ==Training== | | ==Training== |
| ===Amazon SageMaker=== | | ===Amazon SageMaker=== |
| <tabber>
| |
| |-|Causal Language Modeling=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| import sagemaker
| |
| from sagemaker.huggingface import HuggingFace
| |
|
| |
|
| # gets role for executing training job
| | ==Model Card== |
| role = sagemaker.get_execution_role()
| |
| hyperparameters = {
| |
| 'model_name_or_path':'bert-base-uncased',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling
| |
| }
| |
|
| |
|
| # git configuration to download our fine-tuning script
| | ==Comments== |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| | <comments /> |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_clm.py',
| |
| source_dir='./examples/pytorch/language-modeling',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| import sagemaker
| |
| import boto3
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| hyperparameters = {
| |
| 'model_name_or_path':'bert-base-uncased',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_clm.py',
| |
| source_dir='./examples/pytorch/language-modeling',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| }}
| |
| |-|Masked Language Modeling=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| import sagemaker
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| role = sagemaker.get_execution_role()
| |
| hyperparameters = {
| |
| 'model_name_or_path':'bert-base-uncased',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_mlm.py',
| |
| source_dir='./examples/pytorch/language-modeling',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| import sagemaker
| |
| import boto3
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| hyperparameters = {
| |
| 'model_name_or_path':'bert-base-uncased',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_mlm.py',
| |
| source_dir='./examples/pytorch/language-modeling',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| }}
| |
| |-|Question Answering=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| import sagemaker
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| role = sagemaker.get_execution_role()
| |
| hyperparameters = {
| |
| 'model_name_or_path':'bert-base-uncased',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/question-answering
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_qa.py',
| |
| source_dir='./examples/pytorch/question-answering',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| import sagemaker
| |
| import boto3
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| hyperparameters = {
| |
| 'model_name_or_path':'bert-base-uncased',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/question-answering
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_qa.py',
| |
| source_dir='./examples/pytorch/question-answering',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| }}
| |
| |-|Summarization=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| import sagemaker
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| role = sagemaker.get_execution_role()
| |
| hyperparameters = {
| |
| 'model_name_or_path':'bert-base-uncased',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_summarization.py',
| |
| source_dir='./examples/pytorch/seq2seq',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| import sagemaker
| |
| import boto3
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| hyperparameters = {
| |
| 'model_name_or_path':'bert-base-uncased',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_summarization.py',
| |
| source_dir='./examples/pytorch/seq2seq',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| }}
| |
| |-|Text Classification=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| import sagemaker
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| role = sagemaker.get_execution_role()
| |
| hyperparameters = {
| |
| 'model_name_or_path':'bert-base-uncased',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/text-classification
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_glue.py',
| |
| source_dir='./examples/pytorch/text-classification',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| import sagemaker
| |
| import boto3
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| hyperparameters = {
| |
| 'model_name_or_path':'bert-base-uncased',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/text-classification
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_glue.py',
| |
| source_dir='./examples/pytorch/text-classification',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| }}
| |
| |-|Token Classification=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| import sagemaker
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| role = sagemaker.get_execution_role()
| |
| hyperparameters = {
| |
| 'model_name_or_path':'bert-base-uncased',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/token-classification
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_ner.py',
| |
| source_dir='./examples/pytorch/token-classification',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| import sagemaker
| |
| import boto3
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| hyperparameters = {
| |
| 'model_name_or_path':'bert-base-uncased',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/token-classification
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_ner.py',
| |
| source_dir='./examples/pytorch/token-classification',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre> | |
| }}
| |
| |-|Translation=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| import sagemaker
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| role = sagemaker.get_execution_role()
| |
| hyperparameters = {
| |
| 'model_name_or_path':'bert-base-uncased',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_translation.py',
| |
| source_dir='./examples/pytorch/seq2seq',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| import sagemaker
| |
| import boto3
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| hyperparameters = {
| |
| 'model_name_or_path':'bert-base-uncased',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_translation.py',
| |
| source_dir='./examples/pytorch/seq2seq',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| }}
| |
| </tabber>
| |
| | |
| ==Model Card==
| |