#Be sure to have git-lfs installed (https://git-lfs.com) git lfs install git clone https://huggingface.co/jonatasgrosman/wav2vec2-large-xlsr-53-english #To clone the repo without large files – just their pointers #prepend git clone with the following env var: GIT_LFS_SKIP_SMUDGE=1
Jonatasgrosman/wav2vec2-large-xlsr-53-english model: Difference between revisions
No edit summary |
|||
Line 168: | Line 168: | ||
}} | }} | ||
|-|Conversational= | |-|Conversational= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 199: | Line 200: | ||
}) | }) | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 231: | Line 233: | ||
}) | }) | ||
</pre> | </pre> | ||
}} | |||
|-|Feature Extraction= | |-|Feature Extraction= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 263: | Line 267: | ||
}) | }) | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 295: | Line 300: | ||
}) | }) | ||
</pre> | </pre> | ||
}} | |||
|-|Fill-Mask= | |-|Fill-Mask= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 327: | Line 334: | ||
}) | }) | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 359: | Line 367: | ||
}) | }) | ||
</pre> | </pre> | ||
}} | |||
|-|Image Classification= | |-|Image Classification= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 391: | Line 401: | ||
}) | }) | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 423: | Line 434: | ||
}) | }) | ||
</pre> | </pre> | ||
}} | |||
|-|Question Answering= | |-|Question Answering= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 455: | Line 468: | ||
}) | }) | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 487: | Line 501: | ||
}) | }) | ||
</pre> | </pre> | ||
}} | |||
|-|Summarization= | |-|Summarization= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 519: | Line 535: | ||
}) | }) | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 551: | Line 568: | ||
}) | }) | ||
</pre> | </pre> | ||
}} | |||
|-|Table Question Answering= | |-|Table Question Answering= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 583: | Line 602: | ||
}) | }) | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 615: | Line 635: | ||
}) | }) | ||
</pre> | </pre> | ||
}} | |||
|-|Text Classification= | |-|Text Classification= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 647: | Line 669: | ||
}) | }) | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 679: | Line 702: | ||
}) | }) | ||
</pre> | </pre> | ||
}} | |||
|-|Text Generation= | |-|Text Generation= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 711: | Line 736: | ||
}) | }) | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 743: | Line 769: | ||
}) | }) | ||
</pre> | </pre> | ||
}} | |||
|-|Text2Text Generation= | |-|Text2Text Generation= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 775: | Line 803: | ||
}) | }) | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 807: | Line 836: | ||
}) | }) | ||
</pre> | </pre> | ||
}} | |||
|-|Token Classification= | |-|Token Classification= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 839: | Line 870: | ||
}) | }) | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 871: | Line 903: | ||
}) | }) | ||
</pre> | </pre> | ||
}} | |||
|-|Translation= | |-|Translation= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 903: | Line 937: | ||
}) | }) | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 935: | Line 970: | ||
}) | }) | ||
</pre> | </pre> | ||
}} | |||
|-|Zero-Shot Classification= | |-|Zero-Shot Classification= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 967: | Line 1,004: | ||
}) | }) | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
from sagemaker.huggingface import HuggingFaceModel | from sagemaker.huggingface import HuggingFaceModel | ||
Line 999: | Line 1,037: | ||
}) | }) | ||
</pre> | </pre> | ||
}} | |||
</tabber> | </tabber> | ||
Line 1,013: | Line 1,052: | ||
<tabber> | <tabber> | ||
|-|Causal Language Modeling= | |-|Causal Language Modeling= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
import sagemaker | import sagemaker | ||
Line 1,047: | Line 1,087: | ||
huggingface_estimator.fit() | huggingface_estimator.fit() | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
import sagemaker | import sagemaker | ||
Line 1,083: | Line 1,124: | ||
huggingface_estimator.fit() | huggingface_estimator.fit() | ||
</pre> | </pre> | ||
}} | |||
|-|Masked Language Modeling= | |-|Masked Language Modeling= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
import sagemaker | import sagemaker | ||
Line 1,118: | Line 1,161: | ||
huggingface_estimator.fit() | huggingface_estimator.fit() | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
import sagemaker | import sagemaker | ||
Line 1,154: | Line 1,198: | ||
huggingface_estimator.fit() | huggingface_estimator.fit() | ||
</pre> | </pre> | ||
}} | |||
|-|Question Answering= | |-|Question Answering= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
import sagemaker | import sagemaker | ||
Line 1,189: | Line 1,235: | ||
huggingface_estimator.fit() | huggingface_estimator.fit() | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
import sagemaker | import sagemaker | ||
Line 1,225: | Line 1,272: | ||
huggingface_estimator.fit() | huggingface_estimator.fit() | ||
</pre> | </pre> | ||
}} | |||
|-|Summarization= | |-|Summarization= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
import sagemaker | import sagemaker | ||
Line 1,260: | Line 1,309: | ||
huggingface_estimator.fit() | huggingface_estimator.fit() | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
import sagemaker | import sagemaker | ||
Line 1,296: | Line 1,346: | ||
huggingface_estimator.fit() | huggingface_estimator.fit() | ||
</pre> | </pre> | ||
}} | |||
|-|Text Classification= | |-|Text Classification= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
import sagemaker | import sagemaker | ||
Line 1,331: | Line 1,383: | ||
huggingface_estimator.fit() | huggingface_estimator.fit() | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
import sagemaker | import sagemaker | ||
Line 1,367: | Line 1,420: | ||
huggingface_estimator.fit() | huggingface_estimator.fit() | ||
</pre> | </pre> | ||
}} | |||
|-|Token Classification= | |-|Token Classification= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
import sagemaker | import sagemaker | ||
Line 1,402: | Line 1,457: | ||
huggingface_estimator.fit() | huggingface_estimator.fit() | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
import sagemaker | import sagemaker | ||
Line 1,438: | Line 1,494: | ||
huggingface_estimator.fit() | huggingface_estimator.fit() | ||
</pre> | </pre> | ||
}} | |||
|-|Translation= | |-|Translation= | ||
{{#tag:tabber| | |||
AWS= | |||
<pre> | <pre> | ||
import sagemaker | import sagemaker | ||
Line 1,473: | Line 1,531: | ||
huggingface_estimator.fit() | huggingface_estimator.fit() | ||
</pre> | </pre> | ||
{{!}}-{{!}} | |||
Local Machine= | |||
<pre> | <pre> | ||
import sagemaker | import sagemaker | ||
Line 1,509: | Line 1,568: | ||
huggingface_estimator.fit() | huggingface_estimator.fit() | ||
</pre> | </pre> | ||
}} | |||
</tabber> | </tabber> | ||
==Model Card== | ==Model Card== |
Revision as of 19:41, 4 May 2023
Hugging Face
Name
wav2vec2-large-xlsr-53-english
User / Organization
Type
Library
Language
License
Jonatasgrosman/wav2vec2-large-xlsr-53-english model is a Audio model used for Automatic Speech Recognition.
Clone Model Repository
#Be sure to have git-lfs installed (https://git-lfs.com) git lfs install git clone [email protected]:jonatasgrosman/wav2vec2-large-xlsr-53-english #To clone the repo without large files – just their pointers #prepend git clone with the following env var: GIT_LFS_SKIP_SMUDGE=1
Hugging Face Transformers Library
from transformers import AutoProcessor, AutoModelForCTC processor = AutoProcessor.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english") model = AutoModelForCTC.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
Deployment
Inference API
import requests API_URL = "https://api-inference.huggingface.co/models/jonatasgrosman/wav2vec2-large-xlsr-53-english" headers = {"Authorization": f"Bearer {API_TOKEN}"} def query(filename): with open(filename, "rb") as f: data = f.read() response = requests.post(API_URL, headers=headers, data=data) return response.json() output = query("sample1.flac")
async function query(filename) { const data = fs.readFileSync(filename); const response = await fetch( "https://api-inference.huggingface.co/models/jonatasgrosman/wav2vec2-large-xlsr-53-english", { headers: { Authorization: "Bearer {API_TOKEN}" }, method: "POST", body: data, } ); const result = await response.json(); return result; } query("sample1.flac").then((response) => { console.log(JSON.stringify(response)); });
curl https://api-inference.huggingface.co/models/jonatasgrosman/wav2vec2-large-xlsr-53-english \ -X POST \ --data-binary '@sample1.flac' \ -H "Authorization: Bearer {API_TOKEN}"
Amazon SageMaker
from sagemaker.huggingface import HuggingFaceModel import sagemaker role = sagemaker.get_execution_role() # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'automatic-speech-recognition' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import boto3 iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'automatic-speech-recognition' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import sagemaker role = sagemaker.get_execution_role() # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'conversational' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import boto3 iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'conversational' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import sagemaker role = sagemaker.get_execution_role() # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'feature-extraction' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import boto3 iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'feature-extraction' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import sagemaker role = sagemaker.get_execution_role() # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'fill-mask' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import boto3 iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'fill-mask' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import sagemaker role = sagemaker.get_execution_role() # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'image-classification' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import boto3 iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'image-classification' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import sagemaker role = sagemaker.get_execution_role() # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'question-answering' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import boto3 iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'question-answering' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import sagemaker role = sagemaker.get_execution_role() # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'summarization' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import boto3 iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'summarization' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import sagemaker role = sagemaker.get_execution_role() # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'table-question-answering' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import boto3 iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'table-question-answering' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import sagemaker role = sagemaker.get_execution_role() # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'text-classification' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import boto3 iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'text-classification' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import sagemaker role = sagemaker.get_execution_role() # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'text-generation' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import boto3 iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'text-generation' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import sagemaker role = sagemaker.get_execution_role() # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'text2text-generation' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import boto3 iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'text2text-generation' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import sagemaker role = sagemaker.get_execution_role() # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'token-classification' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import boto3 iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'token-classification' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import sagemaker role = sagemaker.get_execution_role() # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'translation' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import boto3 iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'translation' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import sagemaker role = sagemaker.get_execution_role() # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'zero-shot-classification' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
from sagemaker.huggingface import HuggingFaceModel import boto3 iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'HF_TASK':'zero-shot-classification' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, # number of instances instance_type='ml.m5.xlarge' # ec2 instance type ) predictor.predict({ 'inputs': "sample1.flac" })
Spaces
import gradio as gr gr.Interface.load("models/jonatasgrosman/wav2vec2-large-xlsr-53-english").launch()
Training
Amazon SageMaker
import sagemaker from sagemaker.huggingface import HuggingFace # gets role for executing training job role = sagemaker.get_execution_role() hyperparameters = { 'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'output_dir':'/opt/ml/model' # add your remaining hyperparameters # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling } # git configuration to download our fine-tuning script git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'} # creates Hugging Face estimator huggingface_estimator = HuggingFace( entry_point='run_clm.py', source_dir='./examples/pytorch/language-modeling', instance_type='ml.p3.2xlarge', instance_count=1, role=role, git_config=git_config, transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', hyperparameters = hyperparameters ) # starting the train job huggingface_estimator.fit()
import sagemaker import boto3 from sagemaker.huggingface import HuggingFace # gets role for executing training job iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] hyperparameters = { 'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'output_dir':'/opt/ml/model' # add your remaining hyperparameters # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling } # git configuration to download our fine-tuning script git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'} # creates Hugging Face estimator huggingface_estimator = HuggingFace( entry_point='run_clm.py', source_dir='./examples/pytorch/language-modeling', instance_type='ml.p3.2xlarge', instance_count=1, role=role, git_config=git_config, transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', hyperparameters = hyperparameters ) # starting the train job huggingface_estimator.fit()
import sagemaker from sagemaker.huggingface import HuggingFace # gets role for executing training job role = sagemaker.get_execution_role() hyperparameters = { 'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'output_dir':'/opt/ml/model' # add your remaining hyperparameters # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling } # git configuration to download our fine-tuning script git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'} # creates Hugging Face estimator huggingface_estimator = HuggingFace( entry_point='run_mlm.py', source_dir='./examples/pytorch/language-modeling', instance_type='ml.p3.2xlarge', instance_count=1, role=role, git_config=git_config, transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', hyperparameters = hyperparameters ) # starting the train job huggingface_estimator.fit()
import sagemaker import boto3 from sagemaker.huggingface import HuggingFace # gets role for executing training job iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] hyperparameters = { 'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'output_dir':'/opt/ml/model' # add your remaining hyperparameters # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling } # git configuration to download our fine-tuning script git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'} # creates Hugging Face estimator huggingface_estimator = HuggingFace( entry_point='run_mlm.py', source_dir='./examples/pytorch/language-modeling', instance_type='ml.p3.2xlarge', instance_count=1, role=role, git_config=git_config, transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', hyperparameters = hyperparameters ) # starting the train job huggingface_estimator.fit()
import sagemaker from sagemaker.huggingface import HuggingFace # gets role for executing training job role = sagemaker.get_execution_role() hyperparameters = { 'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'output_dir':'/opt/ml/model' # add your remaining hyperparameters # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/question-answering } # git configuration to download our fine-tuning script git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'} # creates Hugging Face estimator huggingface_estimator = HuggingFace( entry_point='run_qa.py', source_dir='./examples/pytorch/question-answering', instance_type='ml.p3.2xlarge', instance_count=1, role=role, git_config=git_config, transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', hyperparameters = hyperparameters ) # starting the train job huggingface_estimator.fit()
import sagemaker import boto3 from sagemaker.huggingface import HuggingFace # gets role for executing training job iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] hyperparameters = { 'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'output_dir':'/opt/ml/model' # add your remaining hyperparameters # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/question-answering } # git configuration to download our fine-tuning script git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'} # creates Hugging Face estimator huggingface_estimator = HuggingFace( entry_point='run_qa.py', source_dir='./examples/pytorch/question-answering', instance_type='ml.p3.2xlarge', instance_count=1, role=role, git_config=git_config, transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', hyperparameters = hyperparameters ) # starting the train job huggingface_estimator.fit()
import sagemaker from sagemaker.huggingface import HuggingFace # gets role for executing training job role = sagemaker.get_execution_role() hyperparameters = { 'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'output_dir':'/opt/ml/model' # add your remaining hyperparameters # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq } # git configuration to download our fine-tuning script git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'} # creates Hugging Face estimator huggingface_estimator = HuggingFace( entry_point='run_summarization.py', source_dir='./examples/pytorch/seq2seq', instance_type='ml.p3.2xlarge', instance_count=1, role=role, git_config=git_config, transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', hyperparameters = hyperparameters ) # starting the train job huggingface_estimator.fit()
import sagemaker import boto3 from sagemaker.huggingface import HuggingFace # gets role for executing training job iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] hyperparameters = { 'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'output_dir':'/opt/ml/model' # add your remaining hyperparameters # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq } # git configuration to download our fine-tuning script git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'} # creates Hugging Face estimator huggingface_estimator = HuggingFace( entry_point='run_summarization.py', source_dir='./examples/pytorch/seq2seq', instance_type='ml.p3.2xlarge', instance_count=1, role=role, git_config=git_config, transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', hyperparameters = hyperparameters ) # starting the train job huggingface_estimator.fit()
import sagemaker from sagemaker.huggingface import HuggingFace # gets role for executing training job role = sagemaker.get_execution_role() hyperparameters = { 'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'output_dir':'/opt/ml/model' # add your remaining hyperparameters # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/text-classification } # git configuration to download our fine-tuning script git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'} # creates Hugging Face estimator huggingface_estimator = HuggingFace( entry_point='run_glue.py', source_dir='./examples/pytorch/text-classification', instance_type='ml.p3.2xlarge', instance_count=1, role=role, git_config=git_config, transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', hyperparameters = hyperparameters ) # starting the train job huggingface_estimator.fit()
import sagemaker import boto3 from sagemaker.huggingface import HuggingFace # gets role for executing training job iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] hyperparameters = { 'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'output_dir':'/opt/ml/model' # add your remaining hyperparameters # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/text-classification } # git configuration to download our fine-tuning script git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'} # creates Hugging Face estimator huggingface_estimator = HuggingFace( entry_point='run_glue.py', source_dir='./examples/pytorch/text-classification', instance_type='ml.p3.2xlarge', instance_count=1, role=role, git_config=git_config, transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', hyperparameters = hyperparameters ) # starting the train job huggingface_estimator.fit()
import sagemaker from sagemaker.huggingface import HuggingFace # gets role for executing training job role = sagemaker.get_execution_role() hyperparameters = { 'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'output_dir':'/opt/ml/model' # add your remaining hyperparameters # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/token-classification } # git configuration to download our fine-tuning script git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'} # creates Hugging Face estimator huggingface_estimator = HuggingFace( entry_point='run_ner.py', source_dir='./examples/pytorch/token-classification', instance_type='ml.p3.2xlarge', instance_count=1, role=role, git_config=git_config, transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', hyperparameters = hyperparameters ) # starting the train job huggingface_estimator.fit()
import sagemaker import boto3 from sagemaker.huggingface import HuggingFace # gets role for executing training job iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] hyperparameters = { 'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'output_dir':'/opt/ml/model' # add your remaining hyperparameters # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/token-classification } # git configuration to download our fine-tuning script git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'} # creates Hugging Face estimator huggingface_estimator = HuggingFace( entry_point='run_ner.py', source_dir='./examples/pytorch/token-classification', instance_type='ml.p3.2xlarge', instance_count=1, role=role, git_config=git_config, transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', hyperparameters = hyperparameters ) # starting the train job huggingface_estimator.fit()
import sagemaker from sagemaker.huggingface import HuggingFace # gets role for executing training job role = sagemaker.get_execution_role() hyperparameters = { 'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'output_dir':'/opt/ml/model' # add your remaining hyperparameters # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq } # git configuration to download our fine-tuning script git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'} # creates Hugging Face estimator huggingface_estimator = HuggingFace( entry_point='run_translation.py', source_dir='./examples/pytorch/seq2seq', instance_type='ml.p3.2xlarge', instance_count=1, role=role, git_config=git_config, transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', hyperparameters = hyperparameters ) # starting the train job huggingface_estimator.fit()
import sagemaker import boto3 from sagemaker.huggingface import HuggingFace # gets role for executing training job iam_client = boto3.client('iam') role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn'] hyperparameters = { 'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english', 'output_dir':'/opt/ml/model' # add your remaining hyperparameters # more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq } # git configuration to download our fine-tuning script git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'} # creates Hugging Face estimator huggingface_estimator = HuggingFace( entry_point='run_translation.py', source_dir='./examples/pytorch/seq2seq', instance_type='ml.p3.2xlarge', instance_count=1, role=role, git_config=git_config, transformers_version='4.17.0', pytorch_version='1.10.2', py_version='py38', hyperparameters = hyperparameters ) # starting the train job huggingface_estimator.fit()