Jonatasgrosman/wav2vec2-large-xlsr-53-english model: Difference between revisions

From AI Wiki
(Created page with "{{Model infobox | hugging-face-uri = jonatasgrosman/wav2vec2-large-xlsr-53-english | creator = | type = Audio | task = Automatic Speech Recognition | library = PyTorch, JAX, Safetensors, Transformers | dataset = common_voice, mozilla-foundation/common_voice_6_0 | language = English | paper = | related-to = wav2vec2, audio, hf-asr-leaderboard, mozilla-foundation/common_voice_6_0, robust-speech-event, speech, xlsr-fine-tuning-week | license = apache-2.0 | all-tags = Auto...")
 
No edit summary
 
(9 intermediate revisions by 2 users not shown)
Line 8: Line 8:
| language = English
| language = English
| paper =  
| paper =  
| related-to = wav2vec2, audio, hf-asr-leaderboard, mozilla-foundation/common_voice_6_0, robust-speech-event, speech, xlsr-fine-tuning-week
| license = apache-2.0
| license = apache-2.0
| related-to = wav2vec2, audio, hf-asr-leaderboard, mozilla-foundation/common_voice_6_0, robust-speech-event, speech, xlsr-fine-tuning-week, Eval Results
| all-tags = Automatic Speech Recognition, PyTorch, JAX, Safetensors, Transformers, common_voice, mozilla-foundation/common_voice_6_0, English, wav2vec2, audio, hf-asr-leaderboard, mozilla-foundation/common_voice_6_0, robust-speech-event, speech, xlsr-fine-tuning-week, Eval Results, License: apache-2.0
| all-tags = Automatic Speech Recognition, PyTorch, JAX, Safetensors, Transformers, common_voice, mozilla-foundation/common_voice_6_0, English, wav2vec2, audio, hf-asr-leaderboard, mozilla-foundation/common_voice_6_0, robust-speech-event, speech, xlsr-fine-tuning-week, Eval Results, License: apache-2.0
| all-lang-tags = English
| all-lang-tags = English
}}
}}
==Model Description==


==Clone Model Repository==
==Clone Model Repository==
===HTTPS===
<tabber>
|-|HTTPS=
<pre>
<pre>
#Be sure to have git-lfs installed (https://git-lfs.com)
#Be sure to have git-lfs installed (https://git-lfs.com)
Line 26: Line 29:
</pre>
</pre>
    
    
===SSH===
|-|SSH=
<pre>
<pre>
#Be sure to have git-lfs installed (https://git-lfs.com)
#Be sure to have git-lfs installed (https://git-lfs.com)
Line 36: Line 39:
GIT_LFS_SKIP_SMUDGE=1
GIT_LFS_SKIP_SMUDGE=1
</pre>
</pre>
 
</tabber>


==Hugging Face Transformers Library==
==Hugging Face Transformers Library==
Line 46: Line 49:
model = AutoModelForCTC.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
model = AutoModelForCTC.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
</pre>
</pre>


==Deployment==
==Deployment==
===Inference API===
===Inference API===
====Python====
<tabber>
|-|Python=
<pre>
<pre>
import requests
import requests
Line 65: Line 68:
output = query("sample1.flac")
output = query("sample1.flac")
</pre>
</pre>
====JavaScript====
|-|JavaScript=
<pre>
<pre>
async function query(filename) {
async function query(filename) {
Line 85: Line 88:
});
});
</pre>
</pre>
====cURL====
|-|cURL=
<pre>
<pre>
curl https://api-inference.huggingface.co/models/jonatasgrosman/wav2vec2-large-xlsr-53-english \
curl https://api-inference.huggingface.co/models/jonatasgrosman/wav2vec2-large-xlsr-53-english \
Line 93: Line 96:


</pre>
</pre>
</tabber>


===Amazon SageMaker===
===Amazon SageMaker===
====Automatic Speech Recognition====
<tabber>
=====AWS=====
|-|Automatic Speech Recognition=
{{#tag:tabber|
AWS=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 105: Line 111:
hub = {
hub = {
'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
'HF_TASK':'image-classification'
'HF_TASK':'automatic-speech-recognition'
}
}


Line 127: Line 133:
})
})
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 137: Line 144:
hub = {
hub = {
'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
'HF_TASK':'image-classification'
'HF_TASK':'automatic-speech-recognition'
}
}


Line 159: Line 166:
})
})
</pre>
</pre>
====Conversational====
}}
=====AWS=====
|-|Conversational=
{{#tag:tabber|
AWS=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 191: Line 200:
})
})
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 223: Line 233:
})
})
</pre>
</pre>
====Feature Extraction====
}}
=====AWS=====
|-|Feature Extraction=
{{#tag:tabber|
AWS=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 255: Line 267:
})
})
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
OLD MODEL INFOBOX
from sagemaker.huggingface import HuggingFaceModel
import boto3
 
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
'HF_TASK':'feature-extraction'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "sample1.flac"
})
</pre>
</pre>
====Fill-Mask====
}}
=====AWS=====
|-|Fill-Mask=
{{#tag:tabber|
AWS=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 291: Line 334:
})
})
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 323: Line 367:
})
})
</pre>
</pre>
====Image Classification====
}}
=====AWS=====
|-|Image Classification=
{{#tag:tabber|
AWS=
<pre>
<pre>
OLD MODEL INFOBOX
from sagemaker.huggingface import HuggingFaceModel
import sagemaker
 
role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
'HF_TASK':'image-classification'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "sample1.flac"
})
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 360: Line 434:
})
})
</pre>
</pre>
====Question Answering====
}}
=====AWS=====
|-|Question Answering=
{{#tag:tabber|
AWS=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 392: Line 468:
})
})
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 424: Line 501:
})
})
</pre>
</pre>
====Summarization====
}}
=====AWS=====
|-|Summarization=
{{#tag:tabber|
AWS=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 456: Line 535:
})
})
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 488: Line 568:
})
})
</pre>
</pre>
====Table Question Answering====
}}
=====AWS=====
|-|Table Question Answering=
{{#tag:tabber|
AWS=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 520: Line 602:
})
})
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 552: Line 635:
})
})
</pre>
</pre>
====Text Classification====
}}
=====AWS=====
|-|Text Classification=
{{#tag:tabber|
AWS=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 584: Line 669:
})
})
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
OLD MODEL INFOBOX
from sagemaker.huggingface import HuggingFaceModel
import boto3
 
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
'HF_TASK':'text-classification'
}
 
# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
env=hub,
role=role,
)
 
# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
initial_instance_count=1, # number of instances
instance_type='ml.m5.xlarge' # ec2 instance type
)
 
predictor.predict({
'inputs': "sample1.flac"
})
</pre>
</pre>
====Text Generation====
}}
=====AWS=====
|-|Text Generation=
{{#tag:tabber|
AWS=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 620: Line 736:
})
})
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 652: Line 769:
})
})
</pre>
</pre>
====Text2Text Generation====
}}
=====AWS=====
|-|Text2Text Generation=
{{#tag:tabber|
AWS=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 684: Line 803:
})
})
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 716: Line 836:
})
})
</pre>
</pre>
====Token Classification====
}}
=====AWS=====
|-|Token Classification=
{{#tag:tabber|
AWS=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 748: Line 870:
})
})
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 780: Line 903:
})
})
</pre>
</pre>
====Translation====
}}
=====AWS=====
|-|Translation=
{{#tag:tabber|
AWS=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 812: Line 937:
})
})
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 844: Line 970:
})
})
</pre>
</pre>
====Zero-Shot Classification====
}}
=====AWS=====
|-|Zero-Shot Classification=
{{#tag:tabber|
AWS=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 876: Line 1,004:
})
})
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.huggingface import HuggingFaceModel
Line 908: Line 1,037:
})
})
</pre>
</pre>
}}
</tabber>


===Spaces===
===Spaces===
Line 915: Line 1,046:
gr.Interface.load("models/jonatasgrosman/wav2vec2-large-xlsr-53-english").launch()
gr.Interface.load("models/jonatasgrosman/wav2vec2-large-xlsr-53-english").launch()
</pre>
</pre>


==Training==
==Training==
===Amazon SageMaker===
===Amazon SageMaker===
====Causal Language Modeling====
<tabber>
=====AWS=====
|-|Causal Language Modeling=
{{#tag:tabber|
AWS=
<pre>
<pre>
import sagemaker
import sagemaker
Line 954: Line 1,086:
huggingface_estimator.fit()
huggingface_estimator.fit()
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
import sagemaker
import sagemaker
Line 990: Line 1,123:
huggingface_estimator.fit()
huggingface_estimator.fit()
</pre>
</pre>
====Masked Language Modeling====
}}
=====AWS=====
|-|Masked Language Modeling=
{{#tag:tabber|
AWS=
<pre>
<pre>
import sagemaker
import sagemaker
Line 1,025: Line 1,160:
huggingface_estimator.fit()
huggingface_estimator.fit()
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
import sagemaker
import sagemaker
Line 1,061: Line 1,197:
huggingface_estimator.fit()
huggingface_estimator.fit()
</pre>
</pre>
====Question Answering====
}}
=====AWS=====
|-|Question Answering=
{{#tag:tabber|
AWS=
<pre>
<pre>
import sagemaker
import sagemaker
Line 1,096: Line 1,234:
huggingface_estimator.fit()
huggingface_estimator.fit()
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
import sagemaker
import sagemaker
Line 1,132: Line 1,271:
huggingface_estimator.fit()
huggingface_estimator.fit()
</pre>
</pre>
====Summarization====
}}
=====AWS=====
|-|Summarization=
{{#tag:tabber|
AWS=
<pre>
<pre>
import sagemaker
import sagemaker
Line 1,167: Line 1,308:
huggingface_estimator.fit()
huggingface_estimator.fit()
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
import sagemaker
import sagemaker
Line 1,203: Line 1,345:
huggingface_estimator.fit()
huggingface_estimator.fit()
</pre>
</pre>
====Text Classification====
}}
=====AWS=====
|-|Text Classification=
{{#tag:tabber|
AWS=
<pre>
<pre>
import sagemaker
import sagemaker
Line 1,238: Line 1,382:
huggingface_estimator.fit()
huggingface_estimator.fit()
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
import sagemaker
import sagemaker
Line 1,274: Line 1,419:
huggingface_estimator.fit()
huggingface_estimator.fit()
</pre>
</pre>
====Token Classification====
}}
=====AWS=====
|-|Token Classification=
{{#tag:tabber|
AWS=
<pre>
<pre>
import sagemaker
import sagemaker
Line 1,309: Line 1,456:
huggingface_estimator.fit()
huggingface_estimator.fit()
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
{{Model infobox
import sagemaker
import boto3
from sagemaker.huggingface import HuggingFace
 
# gets role for executing training job
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
hyperparameters = {
'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
'output_dir':'/opt/ml/model'
# add your remaining hyperparameters
# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/token-classification
}
 
# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}
 
# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
entry_point='run_ner.py',
source_dir='./examples/pytorch/token-classification',
instance_type='ml.p3.2xlarge',
instance_count=1,
role=role,
git_config=git_config,
transformers_version='4.17.0',
pytorch_version='1.10.2',
py_version='py38',
hyperparameters = hyperparameters
)
 
# starting the train job
huggingface_estimator.fit()
</pre>
</pre>
====Translation====
}}
=====AWS=====
|-|Translation=
{{#tag:tabber|
AWS=
<pre>
<pre>
import sagemaker
import sagemaker
Line 1,348: Line 1,530:
huggingface_estimator.fit()
huggingface_estimator.fit()
</pre>
</pre>
=====Local Machine=====
{{!}}-{{!}}
Local Machine=
<pre>
<pre>
import sagemaker
import sagemaker
Line 1,384: Line 1,567:
huggingface_estimator.fit()
huggingface_estimator.fit()
</pre>
</pre>
}}
</tabber>


==Model Card==


==Model Card==
==Comments==
<comments />

Latest revision as of 03:33, 23 May 2023

Jonatasgrosman/wav2vec2-large-xlsr-53-english model is a Audio model used for Automatic Speech Recognition.

Model Description

Clone Model Repository

#Be sure to have git-lfs installed (https://git-lfs.com)
git lfs install
git clone https://huggingface.co/jonatasgrosman/wav2vec2-large-xlsr-53-english
  
#To clone the repo without large files – just their pointers
#prepend git clone with the following env var:
GIT_LFS_SKIP_SMUDGE=1

#Be sure to have git-lfs installed (https://git-lfs.com)
git lfs install
git clone [email protected]:jonatasgrosman/wav2vec2-large-xlsr-53-english
  
#To clone the repo without large files – just their pointers
#prepend git clone with the following env var:
GIT_LFS_SKIP_SMUDGE=1

Hugging Face Transformers Library

from transformers import AutoProcessor, AutoModelForCTC

processor = AutoProcessor.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")

model = AutoModelForCTC.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")

Deployment

Inference API

import requests

API_URL = "https://api-inference.huggingface.co/models/jonatasgrosman/wav2vec2-large-xlsr-53-english"
headers = {"Authorization": f"Bearer {API_TOKEN}"}

def query(filename):
    with open(filename, "rb") as f:
        data = f.read()
    response = requests.post(API_URL, headers=headers, data=data)
    return response.json()

output = query("sample1.flac")

async function query(filename) {
	const data = fs.readFileSync(filename);
	const response = await fetch(
		"https://api-inference.huggingface.co/models/jonatasgrosman/wav2vec2-large-xlsr-53-english",
		{
			headers: { Authorization: "Bearer {API_TOKEN}" },
			method: "POST",
			body: data,
		}
	);
	const result = await response.json();
	return result;
}

query("sample1.flac").then((response) => {
	console.log(JSON.stringify(response));
});

curl https://api-inference.huggingface.co/models/jonatasgrosman/wav2vec2-large-xlsr-53-english \
	-X POST \
	--data-binary '@sample1.flac' \
	-H "Authorization: Bearer {API_TOKEN}"

Amazon SageMaker

from sagemaker.huggingface import HuggingFaceModel
import sagemaker

role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'automatic-speech-recognition'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import boto3

iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'automatic-speech-recognition'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import sagemaker

role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'conversational'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import boto3

iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'conversational'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import sagemaker

role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'feature-extraction'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import boto3

iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'feature-extraction'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import sagemaker

role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'fill-mask'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import boto3

iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'fill-mask'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import sagemaker

role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'image-classification'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import boto3

iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'image-classification'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import sagemaker

role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'question-answering'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import boto3

iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'question-answering'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import sagemaker

role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'summarization'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import boto3

iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'summarization'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import sagemaker

role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'table-question-answering'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import boto3

iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'table-question-answering'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import sagemaker

role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'text-classification'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import boto3

iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'text-classification'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import sagemaker

role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'text-generation'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import boto3

iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'text-generation'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import sagemaker

role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'text2text-generation'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import boto3

iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'text2text-generation'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import sagemaker

role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'token-classification'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import boto3

iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'token-classification'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import sagemaker

role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'translation'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import boto3

iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'translation'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import sagemaker

role = sagemaker.get_execution_role()
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'zero-shot-classification'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

from sagemaker.huggingface import HuggingFaceModel
import boto3

iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
# Hub Model configuration. https://huggingface.co/models
hub = {
	'HF_MODEL_ID':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'HF_TASK':'zero-shot-classification'
}

# create Hugging Face Model Class
huggingface_model = HuggingFaceModel(
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	env=hub,
	role=role, 
)

# deploy model to SageMaker Inference
predictor = huggingface_model.deploy(
	initial_instance_count=1, # number of instances
	instance_type='ml.m5.xlarge' # ec2 instance type
)

predictor.predict({
	'inputs': "sample1.flac"
})

Spaces

import gradio as gr

gr.Interface.load("models/jonatasgrosman/wav2vec2-large-xlsr-53-english").launch()

Training

Amazon SageMaker

import sagemaker
from sagemaker.huggingface import HuggingFace

# gets role for executing training job
role = sagemaker.get_execution_role()
hyperparameters = {
	'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'output_dir':'/opt/ml/model'
	# add your remaining hyperparameters
	# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling
}

# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}

# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
	entry_point='run_clm.py',
	source_dir='./examples/pytorch/language-modeling',
	instance_type='ml.p3.2xlarge',
	instance_count=1,
	role=role,
	git_config=git_config,
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	hyperparameters = hyperparameters
)

# starting the train job
huggingface_estimator.fit()

import sagemaker
import boto3
from sagemaker.huggingface import HuggingFace

# gets role for executing training job
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
hyperparameters = {
	'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'output_dir':'/opt/ml/model'
	# add your remaining hyperparameters
	# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling
}

# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}

# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
	entry_point='run_clm.py',
	source_dir='./examples/pytorch/language-modeling',
	instance_type='ml.p3.2xlarge',
	instance_count=1,
	role=role,
	git_config=git_config,
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	hyperparameters = hyperparameters
)

# starting the train job
huggingface_estimator.fit()

import sagemaker
from sagemaker.huggingface import HuggingFace

# gets role for executing training job
role = sagemaker.get_execution_role()
hyperparameters = {
	'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'output_dir':'/opt/ml/model'
	# add your remaining hyperparameters
	# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling
}

# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}

# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
	entry_point='run_mlm.py',
	source_dir='./examples/pytorch/language-modeling',
	instance_type='ml.p3.2xlarge',
	instance_count=1,
	role=role,
	git_config=git_config,
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	hyperparameters = hyperparameters
)

# starting the train job
huggingface_estimator.fit()

import sagemaker
import boto3
from sagemaker.huggingface import HuggingFace

# gets role for executing training job
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
hyperparameters = {
	'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'output_dir':'/opt/ml/model'
	# add your remaining hyperparameters
	# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/language-modeling
}

# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}

# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
	entry_point='run_mlm.py',
	source_dir='./examples/pytorch/language-modeling',
	instance_type='ml.p3.2xlarge',
	instance_count=1,
	role=role,
	git_config=git_config,
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	hyperparameters = hyperparameters
)

# starting the train job
huggingface_estimator.fit()

import sagemaker
from sagemaker.huggingface import HuggingFace

# gets role for executing training job
role = sagemaker.get_execution_role()
hyperparameters = {
	'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'output_dir':'/opt/ml/model'
	# add your remaining hyperparameters
	# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/question-answering
}

# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}

# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
	entry_point='run_qa.py',
	source_dir='./examples/pytorch/question-answering',
	instance_type='ml.p3.2xlarge',
	instance_count=1,
	role=role,
	git_config=git_config,
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	hyperparameters = hyperparameters
)

# starting the train job
huggingface_estimator.fit()

import sagemaker
import boto3
from sagemaker.huggingface import HuggingFace

# gets role for executing training job
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
hyperparameters = {
	'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'output_dir':'/opt/ml/model'
	# add your remaining hyperparameters
	# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/question-answering
}

# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}

# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
	entry_point='run_qa.py',
	source_dir='./examples/pytorch/question-answering',
	instance_type='ml.p3.2xlarge',
	instance_count=1,
	role=role,
	git_config=git_config,
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	hyperparameters = hyperparameters
)

# starting the train job
huggingface_estimator.fit()

import sagemaker
from sagemaker.huggingface import HuggingFace

# gets role for executing training job
role = sagemaker.get_execution_role()
hyperparameters = {
	'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'output_dir':'/opt/ml/model'
	# add your remaining hyperparameters
	# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq
}

# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}

# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
	entry_point='run_summarization.py',
	source_dir='./examples/pytorch/seq2seq',
	instance_type='ml.p3.2xlarge',
	instance_count=1,
	role=role,
	git_config=git_config,
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	hyperparameters = hyperparameters
)

# starting the train job
huggingface_estimator.fit()

import sagemaker
import boto3
from sagemaker.huggingface import HuggingFace

# gets role for executing training job
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
hyperparameters = {
	'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'output_dir':'/opt/ml/model'
	# add your remaining hyperparameters
	# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq
}

# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}

# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
	entry_point='run_summarization.py',
	source_dir='./examples/pytorch/seq2seq',
	instance_type='ml.p3.2xlarge',
	instance_count=1,
	role=role,
	git_config=git_config,
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	hyperparameters = hyperparameters
)

# starting the train job
huggingface_estimator.fit()

import sagemaker
from sagemaker.huggingface import HuggingFace

# gets role for executing training job
role = sagemaker.get_execution_role()
hyperparameters = {
	'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'output_dir':'/opt/ml/model'
	# add your remaining hyperparameters
	# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/text-classification
}

# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}

# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
	entry_point='run_glue.py',
	source_dir='./examples/pytorch/text-classification',
	instance_type='ml.p3.2xlarge',
	instance_count=1,
	role=role,
	git_config=git_config,
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	hyperparameters = hyperparameters
)

# starting the train job
huggingface_estimator.fit()

import sagemaker
import boto3
from sagemaker.huggingface import HuggingFace

# gets role for executing training job
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
hyperparameters = {
	'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'output_dir':'/opt/ml/model'
	# add your remaining hyperparameters
	# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/text-classification
}

# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}

# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
	entry_point='run_glue.py',
	source_dir='./examples/pytorch/text-classification',
	instance_type='ml.p3.2xlarge',
	instance_count=1,
	role=role,
	git_config=git_config,
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	hyperparameters = hyperparameters
)

# starting the train job
huggingface_estimator.fit()

import sagemaker
from sagemaker.huggingface import HuggingFace

# gets role for executing training job
role = sagemaker.get_execution_role()
hyperparameters = {
	'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'output_dir':'/opt/ml/model'
	# add your remaining hyperparameters
	# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/token-classification
}

# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}

# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
	entry_point='run_ner.py',
	source_dir='./examples/pytorch/token-classification',
	instance_type='ml.p3.2xlarge',
	instance_count=1,
	role=role,
	git_config=git_config,
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	hyperparameters = hyperparameters
)

# starting the train job
huggingface_estimator.fit()

import sagemaker
import boto3
from sagemaker.huggingface import HuggingFace

# gets role for executing training job
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
hyperparameters = {
	'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'output_dir':'/opt/ml/model'
	# add your remaining hyperparameters
	# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/token-classification
}

# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}

# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
	entry_point='run_ner.py',
	source_dir='./examples/pytorch/token-classification',
	instance_type='ml.p3.2xlarge',
	instance_count=1,
	role=role,
	git_config=git_config,
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	hyperparameters = hyperparameters
)

# starting the train job
huggingface_estimator.fit()

import sagemaker
from sagemaker.huggingface import HuggingFace

# gets role for executing training job
role = sagemaker.get_execution_role()
hyperparameters = {
	'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'output_dir':'/opt/ml/model'
	# add your remaining hyperparameters
	# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq
}

# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}

# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
	entry_point='run_translation.py',
	source_dir='./examples/pytorch/seq2seq',
	instance_type='ml.p3.2xlarge',
	instance_count=1,
	role=role,
	git_config=git_config,
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	hyperparameters = hyperparameters
)

# starting the train job
huggingface_estimator.fit()

import sagemaker
import boto3
from sagemaker.huggingface import HuggingFace

# gets role for executing training job
iam_client = boto3.client('iam')
role = iam_client.get_role(RoleName='{IAM_ROLE_WITH_SAGEMAKER_PERMISSIONS}')['Role']['Arn']
hyperparameters = {
	'model_name_or_path':'jonatasgrosman/wav2vec2-large-xlsr-53-english',
	'output_dir':'/opt/ml/model'
	# add your remaining hyperparameters
	# more info here https://github.com/huggingface/transformers/tree/v4.17.0/examples/pytorch/seq2seq
}

# git configuration to download our fine-tuning script
git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.17.0'}

# creates Hugging Face estimator
huggingface_estimator = HuggingFace(
	entry_point='run_translation.py',
	source_dir='./examples/pytorch/seq2seq',
	instance_type='ml.p3.2xlarge',
	instance_count=1,
	role=role,
	git_config=git_config,
	transformers_version='4.17.0',
	pytorch_version='1.10.2',
	py_version='py38',
	hyperparameters = hyperparameters
)

# starting the train job
huggingface_estimator.fit()

Model Card

Comments

Loading comments...