|
|
Line 1: |
Line 1: |
| <tabber> | | <tabber> |
| |-|HTTPS= | | |-|First Tab Title= |
| <pre>
| | First tab content goes here. |
| #Be sure to have git-lfs installed (https://git-lfs.com)
| | |-|Second Tab Title= |
| git lfs install
| | Second tab content goes here. |
| git clone https://huggingface.co/SZTAKI-HLT/hubert-base-cc
| | |-|Third Tab Title= |
|
| | Third tab content goes here. |
| #To clone the repo without large files – just their pointers
| |
| #prepend git clone with the following env var:
| |
| GIT_LFS_SKIP_SMUDGE=1
| |
| </pre>
| |
|
| |
| |-|SSH=
| |
| <pre>
| |
| #Be sure to have git-lfs installed (https://git-lfs.com)
| |
| git lfs install
| |
| | |
|
| |
| #To clone the repo without large files – just their pointers
| |
| #prepend git clone with the following env var:
| |
| GIT_LFS_SKIP_SMUDGE=1
| |
| </pre>
| |
| </tabber>
| |
| | |
| ==Hugging Face Transformers Library==
| |
| <pre>
| |
| from transformers import AutoModel
| |
| | |
| model = AutoModel.from_pretrained("SZTAKI-HLT/hubert-base-cc")
| |
| </pre>
| |
| | |
| ==Deployment==
| |
| ===Inference API===
| |
| <tabber>
| |
| </tabber>
| |
| | |
| ===Amazon SageMaker===
| |
| <tabber>
| |
| |-|Automatic Speech Recognition=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'automatic-speech-recognition'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'automatic-speech-recognition'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Conversational=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'conversational'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'conversational'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Feature Extraction=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'feature-extraction'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'feature-extraction'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Fill-Mask=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'fill-mask'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'fill-mask'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Image Classification=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'image-classification'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'image-classification'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Question Answering=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'question-answering'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'question-answering'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Summarization=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'summarization'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'summarization'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Table Question Answering=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'table-question-answering'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'table-question-answering'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Text Classification=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'text-classification'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'text-classification'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Text Generation=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'text-generation'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'text-generation'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Text2Text Generation=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'text2text-generation'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'text2text-generation'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Token Classification=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'token-classification'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'token-classification'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Translation=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'translation'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'translation'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| }}
| |
| |-|Zero-Shot Classification=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import sagemaker
| |
| | |
| role = sagemaker.get_execution_role()
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'zero-shot-classification'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| from sagemaker.huggingface import HuggingFaceModel
| |
| import boto3
| |
| | |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| # Hub Model configuration. https://huggingface.co/models
| |
| hub = {
| |
| 'HF_MODEL_ID':'SZTAKI-HLT/hubert-base-cc',
| |
| 'HF_TASK':'zero-shot-classification'
| |
| }
| |
| | |
| # create Hugging Face Model Class
| |
| huggingface_model = HuggingFaceModel(
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| env=hub,
| |
| role=role,
| |
| )
| |
| | |
| # deploy model to SageMaker Inference
| |
| predictor = huggingface_model.deploy(
| |
| initial_instance_count=1, # number of instances
| |
| instance_type='ml.m5.xlarge' # ec2 instance type
| |
| )
| |
| | |
| predictor.predict({
| |
| 'inputs': No input example has been defined for this model task.
| |
| })
| |
| </pre>
| |
| }}
| |
| </tabber>
| |
| | |
| ===Spaces===
| |
| <pre>
| |
| undefined
| |
| </pre>
| |
| | |
| ==Training==
| |
| ===Amazon SageMaker===
| |
| <tabber>
| |
| |-|Causal Language Modeling=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| import sagemaker
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| role = sagemaker.get_execution_role()
| |
| hyperparameters = {
| |
| 'model_name_or_path':'SZTAKI-HLT/hubert-base-cc',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_clm.py',
| |
| source_dir='./examples/pytorch/language-modeling',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| import sagemaker
| |
| import boto3
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| hyperparameters = {
| |
| 'model_name_or_path':'SZTAKI-HLT/hubert-base-cc',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_clm.py',
| |
| source_dir='./examples/pytorch/language-modeling',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| }}
| |
| |-|Masked Language Modeling= | |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| import sagemaker
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| role = sagemaker.get_execution_role()
| |
| hyperparameters = {
| |
| 'model_name_or_path':'SZTAKI-HLT/hubert-base-cc',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_mlm.py',
| |
| source_dir='./examples/pytorch/language-modeling',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| import sagemaker
| |
| import boto3
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| hyperparameters = {
| |
| 'model_name_or_path':'SZTAKI-HLT/hubert-base-cc',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_mlm.py',
| |
| source_dir='./examples/pytorch/language-modeling',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| }}
| |
| |-|Question Answering= | |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| import sagemaker
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| role = sagemaker.get_execution_role()
| |
| hyperparameters = {
| |
| 'model_name_or_path':'SZTAKI-HLT/hubert-base-cc',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/question-answering
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_qa.py',
| |
| source_dir='./examples/pytorch/question-answering',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| import sagemaker
| |
| import boto3
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| hyperparameters = {
| |
| 'model_name_or_path':'SZTAKI-HLT/hubert-base-cc',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/question-answering
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_qa.py',
| |
| source_dir='./examples/pytorch/question-answering',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| }}
| |
| |-|Summarization=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| import sagemaker
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| role = sagemaker.get_execution_role()
| |
| hyperparameters = {
| |
| 'model_name_or_path':'SZTAKI-HLT/hubert-base-cc',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_summarization.py',
| |
| source_dir='./examples/pytorch/seq2seq',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| import sagemaker
| |
| import boto3
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| hyperparameters = {
| |
| 'model_name_or_path':'SZTAKI-HLT/hubert-base-cc',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_summarization.py',
| |
| source_dir='./examples/pytorch/seq2seq',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| }}
| |
| |-|Text Classification=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| import sagemaker
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| role = sagemaker.get_execution_role()
| |
| hyperparameters = {
| |
| 'model_name_or_path':'SZTAKI-HLT/hubert-base-cc',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/text-classification
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_glue.py',
| |
| source_dir='./examples/pytorch/text-classification',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| import sagemaker
| |
| import boto3
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| hyperparameters = {
| |
| 'model_name_or_path':'SZTAKI-HLT/hubert-base-cc',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/text-classification
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_glue.py',
| |
| source_dir='./examples/pytorch/text-classification',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| }}
| |
| |-|Token Classification=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| import sagemaker
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| role = sagemaker.get_execution_role()
| |
| hyperparameters = {
| |
| 'model_name_or_path':'SZTAKI-HLT/hubert-base-cc',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/token-classification
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_ner.py',
| |
| source_dir='./examples/pytorch/token-classification',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| import sagemaker
| |
| import boto3
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| hyperparameters = {
| |
| 'model_name_or_path':'SZTAKI-HLT/hubert-base-cc',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/token-classification
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_ner.py',
| |
| source_dir='./examples/pytorch/token-classification',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| }}
| |
| |-|Translation=
| |
| {{#tag:tabber|
| |
| AWS=
| |
| <pre>
| |
| import sagemaker
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| role = sagemaker.get_execution_role()
| |
| hyperparameters = {
| |
| 'model_name_or_path':'SZTAKI-HLT/hubert-base-cc',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_translation.py',
| |
| source_dir='./examples/pytorch/seq2seq',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| {{!}}-{{!}}
| |
| Local Machine=
| |
| <pre>
| |
| import sagemaker
| |
| import boto3
| |
| from sagemaker.huggingface import HuggingFace
| |
| | |
| # gets role for executing training job
| |
| iam_client = boto3.client('iam')
| |
| role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
| |
| hyperparameters = {
| |
| 'model_name_or_path':'SZTAKI-HLT/hubert-base-cc',
| |
| 'output_dir':'/opt/ml/model'
| |
| # add your remaining hyperparameters
| |
| # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq
| |
| }
| |
| | |
| # git configuration to download our fine-tuning script
| |
| git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
| |
| | |
| # creates Hugging Face estimator
| |
| huggingface_estimator = HuggingFace(
| |
| entry_point='run_translation.py',
| |
| source_dir='./examples/pytorch/seq2seq',
| |
| instance_type='ml.p3.2xlarge',
| |
| instance_count=1,
| |
| role=role,
| |
| git_config=git_config,
| |
| transformers_version='4.17.0',
| |
| pytorch_version='1.10.2',
| |
| py_version='py38',
| |
| hyperparameters = hyperparameters
| |
| )
| |
| | |
| # starting the train job
| |
| huggingface_estimator.fit()
| |
| </pre>
| |
| }}
| |
| </tabber> | | </tabber> |