code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
_lowerCamelCase = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
_lowerCamelCase = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
_lowerCamelCase = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
_lowerCamelCase = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
_lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModel)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCamelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCamelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowerCamelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_lowerCamelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCamelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_lowerCamelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
) | 6 |
from torch import nn
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' ) | 6 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = 384
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ = [3, 3, 9, 3]
SCREAMING_SNAKE_CASE__ = [96, 192, 384, 768]
if "small" in model_name:
SCREAMING_SNAKE_CASE__ = [3, 3, 27, 3]
SCREAMING_SNAKE_CASE__ = [96, 192, 384, 768]
if "base" in model_name:
SCREAMING_SNAKE_CASE__ = [3, 3, 27, 3]
SCREAMING_SNAKE_CASE__ = [128, 256, 512, 1_024]
SCREAMING_SNAKE_CASE__ = 512
if "large" in model_name:
SCREAMING_SNAKE_CASE__ = [3, 3, 27, 3]
SCREAMING_SNAKE_CASE__ = [192, 384, 768, 1_536]
SCREAMING_SNAKE_CASE__ = 768
if "xlarge" in model_name:
SCREAMING_SNAKE_CASE__ = [3, 3, 27, 3]
SCREAMING_SNAKE_CASE__ = [256, 512, 1_024, 2_048]
SCREAMING_SNAKE_CASE__ = 1_024
# set label information
SCREAMING_SNAKE_CASE__ = 150
SCREAMING_SNAKE_CASE__ = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = ConvNextConfig(
depths=UpperCamelCase__ , hidden_sizes=UpperCamelCase__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
SCREAMING_SNAKE_CASE__ = UperNetConfig(
backbone_config=UpperCamelCase__ , auxiliary_in_channels=UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , )
return config
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = val
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
SCREAMING_SNAKE_CASE__ = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE__ = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" )["""state_dict"""]
SCREAMING_SNAKE_CASE__ = get_upernet_config(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = UperNetForSemanticSegmentation(UpperCamelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ = state_dict.pop(UpperCamelCase__ )
if "bn" in key:
SCREAMING_SNAKE_CASE__ = key.replace("""bn""" , """batch_norm""" )
SCREAMING_SNAKE_CASE__ = val
# rename keys
SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify on image
SCREAMING_SNAKE_CASE__ = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ = SegformerImageProcessor()
SCREAMING_SNAKE_CASE__ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ )
if model_name == "upernet-convnext-tiny":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F'''upernet-convnext-{size}''' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCamelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 6 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 847
SCREAMING_SNAKE_CASE__ = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 150
SCREAMING_SNAKE_CASE__ = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 171
SCREAMING_SNAKE_CASE__ = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
SCREAMING_SNAKE_CASE__ = 133
SCREAMING_SNAKE_CASE__ = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 19
SCREAMING_SNAKE_CASE__ = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 65
SCREAMING_SNAKE_CASE__ = """mapillary-vistas-id2label.json"""
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] ):
SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = val
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: bool = False ):
SCREAMING_SNAKE_CASE__ = get_maskformer_config(UpperCamelCase__ )
# load original state_dict
with open(UpperCamelCase__ , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# update to torch tensors
for key, value in state_dict.items():
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
# load 🤗 model
SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(UpperCamelCase__ )
model.eval()
for name, param in model.named_parameters():
print(UpperCamelCase__ , param.shape )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCamelCase__ ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
SCREAMING_SNAKE_CASE__ = prepare_img()
if "vistas" in model_name:
SCREAMING_SNAKE_CASE__ = 65
elif "cityscapes" in model_name:
SCREAMING_SNAKE_CASE__ = 65_535
else:
SCREAMING_SNAKE_CASE__ = 255
SCREAMING_SNAKE_CASE__ = True if """ade""" in model_name else False
SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=UpperCamelCase__ , reduce_labels=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase__ )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCamelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 6 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Dict , *__A :List[str] , **__A :Dict ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Dict , *__A :Dict , **__A :Union[str, Any] ) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :Tuple , *__A :Optional[Any] , **__A :str ) -> Any:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Dict , *__A :List[str] , **__A :str ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Any , *__A :Tuple , **__A :Optional[int] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :int , *__A :Optional[Any] , **__A :Any ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :List[Any] , *__A :Any , **__A :List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Tuple , *__A :Optional[Any] , **__A :List[Any] ) -> Any:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :Any , *__A :List[str] , **__A :int ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :int , *__A :Tuple , **__A :str ) -> str:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :List[str] , *__A :List[str] , **__A :Optional[Any] ) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :List[Any] , *__A :Any , **__A :List[Any] ) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Any , *__A :List[Any] , **__A :int ) -> str:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Union[str, Any] , *__A :Dict , **__A :str ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :List[str] , *__A :Tuple , **__A :List[str] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Optional[Any] , *__A :Optional[int] , **__A :Union[str, Any] ) -> int:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Dict , *__A :Any , **__A :Union[str, Any] ) -> Any:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :Optional[Any] , *__A :List[Any] , **__A :Dict ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Any , *__A :int , **__A :str ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :str , *__A :int , **__A :Any ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :int , *__A :List[Any] , **__A :Dict ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :int , *__A :Optional[Any] , **__A :str ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :int , *__A :str , **__A :int ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :Any , *__A :Union[str, Any] , **__A :int ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Optional[Any] , *__A :Optional[Any] , **__A :Optional[Any] ) -> str:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Tuple , *__A :List[str] , **__A :int ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :Dict , *__A :List[Any] , **__A :Any ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Dict , *__A :Optional[Any] , **__A :List[str] ) -> Dict:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Any , *__A :List[Any] , **__A :Dict ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :List[Any] , *__A :Optional[int] , **__A :List[str] ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Any , *__A :Optional[Any] , **__A :List[Any] ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Dict , *__A :Optional[Any] , **__A :Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :Union[str, Any] , *__A :str , **__A :str ) -> int:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :int , *__A :List[str] , **__A :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :List[Any] , *__A :Optional[Any] , **__A :Tuple ) -> int:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :List[str] , *__A :Optional[Any] , **__A :Any ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["flax"]
def __init__( self :Tuple , *__A :Dict , **__A :List[Any] ) -> str:
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls :Optional[int] , *__A :Any , **__A :int ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls :int , *__A :Tuple , **__A :List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax"""] ) | 6 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowerCamelCase = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowerCamelCase = 0
_lowerCamelCase = 0XE0_00
_lowerCamelCase = 0XE0_01
_lowerCamelCase = 0XE0_02
_lowerCamelCase = 0XE0_03
_lowerCamelCase = 0XE0_04
# Maps special codepoints to human-readable names.
_lowerCamelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :str , __A :str=chr(__A ) , __A :str=chr(__A ) , __A :Dict=chr(__A ) , __A :str=chr(__A ) , __A :Union[str, Any]=chr(__A ) , __A :str=chr(__A ) , __A :int=False , __A :int=2048 , **__A :Dict , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE__ = len(self._special_codepoints )
@property
def _snake_case ( self :Optional[Any] ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def _snake_case ( self :Tuple , __A :str ) -> List[str]:
"""simple docstring"""
return list(__A )
def _snake_case ( self :Optional[Any] , __A :str ) -> int:
"""simple docstring"""
try:
return ord(__A )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def _snake_case ( self :str , __A :int ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__A )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Any:
"""simple docstring"""
return "".join(__A )
def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
result += ([0] * len(__A )) + [1]
return result
def _snake_case ( self :List[str] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Any:
"""simple docstring"""
return () | 6 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__A , )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 6 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_lowerCamelCase = 'bert-base-cased'
_lowerCamelCase = 'fp16'
_lowerCamelCase = 'bf16'
_lowerCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def _snake_case ( self :List[Any] ) -> Tuple:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = f'''{i + 1}'''
SCREAMING_SNAKE_CASE__ = strategy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = prefetch_policy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = state_dict_type
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A )
for policy in FSDP_AUTO_WRAP_POLICY:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = policy
if policy == "TRANSFORMER_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """2000"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """T5Layer"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
with self.assertRaises(__A ) as cm:
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """0"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _snake_case ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = mp_dtype
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = Accelerator()
if mp_dtype == "fp16":
SCREAMING_SNAKE_CASE__ = torch.floataa
elif mp_dtype == "bf16":
SCREAMING_SNAKE_CASE__ = torch.bfloataa
SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__A )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = str(__A ).lower()
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = 0.8_2
SCREAMING_SNAKE_CASE__ = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
SCREAMING_SNAKE_CASE__ = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def _snake_case ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" )
SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
SCREAMING_SNAKE_CASE__ = cmd.copy()
for i, strategy in enumerate(__A ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
SCREAMING_SNAKE_CASE__ = len(__A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
SCREAMING_SNAKE_CASE__ = cmd_config[:-1]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
SCREAMING_SNAKE_CASE__ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(__A ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() ) | 6 | 1 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_lowerCamelCase = False
_lowerCamelCase = True
_lowerCamelCase = False
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
_lowerCamelCase = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
_lowerCamelCase = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
_lowerCamelCase = reader.read()
_lowerCamelCase = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
_lowerCamelCase = UNetaDModel(**config)
else:
_lowerCamelCase = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
_lowerCamelCase = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_lowerCamelCase = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_lowerCamelCase = config[key]
del config[key]
_lowerCamelCase = [k.replace('UNetRes', '') for k in config['down_block_types']]
_lowerCamelCase = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
_lowerCamelCase = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
_lowerCamelCase = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
_lowerCamelCase = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
_lowerCamelCase = param_value
_lowerCamelCase = True
if not has_changed:
_lowerCamelCase = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder)) | 6 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCamelCase = logging.get_logger(__name__)
# General docstring
_lowerCamelCase = 'PoolFormerConfig'
# Base docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = [1, 512, 7, 7]
# Image classification docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = 'tabby, tabby cat'
_lowerCamelCase = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ):
if drop_prob == 0.0 or not training:
return input
SCREAMING_SNAKE_CASE__ = 1 - drop_prob
SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor
return output
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = drop_prob
def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return drop_path(__A , self.drop_prob , self.training )
def _snake_case ( self :Dict ) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size)
SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride)
SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding)
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A )
SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity()
def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.projection(__A )
SCREAMING_SNAKE_CASE__ = self.norm(__A )
return embeddings
class UpperCamelCase_ ( nn.GroupNorm ):
def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict:
"""simple docstring"""
super().__init__(1 , __A , **__A )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :List[str] , __A :Optional[int] ) -> Any:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A )
def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.pool(__A ) - hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A )
if isinstance(config.hidden_act , __A ):
SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE__ = config.hidden_act
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.act_fn(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
# Useful for training neural nets
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity()
SCREAMING_SNAKE_CASE__ = config.use_layer_scale
if config.use_layer_scale:
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str:
"""simple docstring"""
if self.use_layer_scale:
SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = ()
SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
else:
SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) )
# First residual connection
SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states
SCREAMING_SNAKE_CASE__ = ()
# Second residual connection inside the PoolFormerOutput block
SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) )
SCREAMING_SNAKE_CASE__ = hidden_states + layer_output
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = config
# stochastic depth decay rule
SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
SCREAMING_SNAKE_CASE__ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
# Transformer blocks
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
SCREAMING_SNAKE_CASE__ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__A ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None
SCREAMING_SNAKE_CASE__ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers
# Get patch embeddings from hidden_states
SCREAMING_SNAKE_CASE__ = embedding_layer(__A )
# Send the embeddings through the blocks
for _, blk in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = blk(__A )
SCREAMING_SNAKE_CASE__ = layer_outputs[0]
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PoolFormerConfig
lowerCamelCase_ = "poolformer"
lowerCamelCase_ = "pixel_values"
lowerCamelCase_ = True
def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict:
"""simple docstring"""
if isinstance(__A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any:
"""simple docstring"""
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = value
_lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Any ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A )
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
SCREAMING_SNAKE_CASE__ = self.encoder(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :int , __A :Optional[int] ) -> Tuple:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size )
def _snake_case ( self :List[Any] , __A :Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dense(__A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :str , __A :Union[str, Any] ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config.num_labels
SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A )
# Final norm
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
SCREAMING_SNAKE_CASE__ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = self.poolformer(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = outputs[0]
SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) )
SCREAMING_SNAKE_CASE__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE__ = """single_label_classification"""
else:
SCREAMING_SNAKE_CASE__ = """multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE__ = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
if not return_dict:
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states ) | 6 | 1 |
_lowerCamelCase = 8.31_4462 # Unit - J mol-1 K-1
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod() | 6 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_lengths
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = gelu_activation
SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings
SCREAMING_SNAKE_CASE__ = causal
SCREAMING_SNAKE_CASE__ = asm
SCREAMING_SNAKE_CASE__ = n_langs
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_special
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = summary_type
SCREAMING_SNAKE_CASE__ = use_proj
SCREAMING_SNAKE_CASE__ = scope
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 )
def _snake_case ( self :int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__A )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__A )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__A )
@slow
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(config=__A )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE__ = torch.jit.trace(
__A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) )
SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A )
loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__A )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) ) | 6 | 1 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_lowerCamelCase = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
_lowerCamelCase = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
SCREAMING_SNAKE_CASE__ = bs[:]
SCREAMING_SNAKE_CASE__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE__ = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
return pairs
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ["input_ids", "attention_mask"]
def __init__( self :Dict , __A :Tuple , __A :Union[str, Any] , __A :int="replace" , __A :int="<s>" , __A :str="</s>" , __A :Dict="</s>" , __A :List[str]="<s>" , __A :Dict="<unk>" , __A :Optional[Any]="<pad>" , __A :Tuple="<mask>" , __A :List[Any]=False , **__A :List[Any] , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="""utf-8""" ) as vocab_handle:
SCREAMING_SNAKE_CASE__ = json.load(__A )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE__ = bytes_to_unicode()
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="""utf-8""" ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split("""\n""" )[1:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE__ = dict(zip(__A , range(len(__A ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE__ = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
return len(self.encoder )
def _snake_case ( self :Tuple ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self :int , __A :List[Any] ) -> Dict:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = tuple(__A )
SCREAMING_SNAKE_CASE__ = get_pairs(__A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(__A ):
try:
SCREAMING_SNAKE_CASE__ = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(__A )
SCREAMING_SNAKE_CASE__ = new_word
if len(__A ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(__A )
SCREAMING_SNAKE_CASE__ = """ """.join(__A )
SCREAMING_SNAKE_CASE__ = word
return word
def _snake_case ( self :Union[str, Any] , __A :List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
for token in re.findall(self.pat , __A ):
SCREAMING_SNAKE_CASE__ = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(""" """ ) )
return bpe_tokens
def _snake_case ( self :Union[str, Any] , __A :Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _snake_case ( self :int , __A :Tuple ) -> Union[str, Any]:
"""simple docstring"""
return self.decoder.get(__A )
def _snake_case ( self :Any , __A :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """""".join(__A )
SCREAMING_SNAKE_CASE__ = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def _snake_case ( self :List[Any] , __A :str , __A :Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + """\n""" )
SCREAMING_SNAKE_CASE__ = 0
with open(__A , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
SCREAMING_SNAKE_CASE__ = token_index
writer.write(""" """.join(__A ) + """\n""" )
index += 1
return vocab_file, merge_file
def _snake_case ( self :str , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _snake_case ( self :Any , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self :Optional[int] , __A :Union[str, Any] , __A :int=False , **__A :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE__ = """ """ + text
return (text, kwargs)
def _snake_case ( self :Any , __A :List[int] , __A :Optional[List[int]] = None ) -> Union[str, Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def _snake_case ( self :Any , __A :"Conversation" ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
SCREAMING_SNAKE_CASE__ = """ """.join(__A )
SCREAMING_SNAKE_CASE__ = self.encode(__A )
if len(__A ) > self.model_max_length:
SCREAMING_SNAKE_CASE__ = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids | 6 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple=True ):
model.train()
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any]=False ):
set_seed(42 )
SCREAMING_SNAKE_CASE__ = RegressionModel()
SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
# Test when on a single CPU or GPU that the context manager does nothing
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
# Test on distributed setup that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=False , UpperCamelCase__: Union[str, Any]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if iteration < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if batch_num < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(UpperCamelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(UpperCamelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 6 | 1 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = FlaxAutoModelForSeqaSeqLM.from_config(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = checkpoints.load_tax_checkpoint(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
SCREAMING_SNAKE_CASE__ = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
SCREAMING_SNAKE_CASE__ = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
SCREAMING_SNAKE_CASE__ = f'''layers_{str(UpperCamelCase__ )}'''
# Self-Attention
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
SCREAMING_SNAKE_CASE__ = flax_model.params["""encoder"""]["""block"""][str(UpperCamelCase__ )]["""layer"""]
SCREAMING_SNAKE_CASE__ = tax_attention_key
SCREAMING_SNAKE_CASE__ = tax_attention_out
SCREAMING_SNAKE_CASE__ = tax_attention_query
SCREAMING_SNAKE_CASE__ = tax_attention_value
SCREAMING_SNAKE_CASE__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ = tax_global_layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = tax_mlp_wi_a
SCREAMING_SNAKE_CASE__ = tax_mlp_wi_a
else:
SCREAMING_SNAKE_CASE__ = tax_mlp_wi
SCREAMING_SNAKE_CASE__ = tax_mlp_wo
SCREAMING_SNAKE_CASE__ = tax_mlp_layer_norm
SCREAMING_SNAKE_CASE__ = flax_model_encoder_layer_block
# Only for layer 0:
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
SCREAMING_SNAKE_CASE__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
SCREAMING_SNAKE_CASE__ = tax_encoder_global_rel_embedding
# Assigning
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
SCREAMING_SNAKE_CASE__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
SCREAMING_SNAKE_CASE__ = f'''layers_{str(UpperCamelCase__ )}'''
# Self-Attention
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_module["""key"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_module["""out"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_module["""query"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
SCREAMING_SNAKE_CASE__ = flax_model.params["""decoder"""]["""block"""][str(UpperCamelCase__ )]["""layer"""]
SCREAMING_SNAKE_CASE__ = tax_attention_key
SCREAMING_SNAKE_CASE__ = tax_attention_out
SCREAMING_SNAKE_CASE__ = tax_attention_query
SCREAMING_SNAKE_CASE__ = tax_attention_value
SCREAMING_SNAKE_CASE__ = tax_pre_attention_layer_norm
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_key
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_out
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_query
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_value
SCREAMING_SNAKE_CASE__ = tax_cross_layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = tax_mlp_wi_a
SCREAMING_SNAKE_CASE__ = tax_mlp_wi_a
else:
SCREAMING_SNAKE_CASE__ = tax_mlp_wi
SCREAMING_SNAKE_CASE__ = tax_mlp_wo
SCREAMING_SNAKE_CASE__ = txa_mlp_layer_norm
SCREAMING_SNAKE_CASE__ = flax_model_decoder_layer_block
# Decoder Normalization
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
SCREAMING_SNAKE_CASE__ = txa_decoder_norm
# Only for layer 0:
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
SCREAMING_SNAKE_CASE__ = tax_decoder_rel_embedding
# Token Embeddings
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
SCREAMING_SNAKE_CASE__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(UpperCamelCase__ )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
_lowerCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path) | 6 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "AutoImageProcessor"
lowerCamelCase_ = "AutoTokenizer"
def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict:
"""simple docstring"""
super().__init__(__A , __A )
SCREAMING_SNAKE_CASE__ = self.image_processor
def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"] | 6 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE__ = subparsers.add_parser("""test""" )
else:
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=UpperCamelCase__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
SCREAMING_SNAKE_CASE__ = script_name
else:
SCREAMING_SNAKE_CASE__ = f'''--config_file={args.config_file} {script_name}'''
SCREAMING_SNAKE_CASE__ = ["""accelerate-launch"""] + test_args.split()
SCREAMING_SNAKE_CASE__ = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = test_command_parser()
SCREAMING_SNAKE_CASE__ = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main() | 6 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ = True
for i in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE__ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE__ = dp[i][j - 1]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE__ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
SCREAMING_SNAKE_CASE__ = s - 2 * j
break
return diff | 6 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCamelCase = 'src/diffusers'
_lowerCamelCase = '.'
# This is to make sure the diffusers module imported is the one in the repo.
_lowerCamelCase = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowerCamelCase = spec.loader.load_module()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[int] ):
return line.startswith(UpperCamelCase__ ) or len(UpperCamelCase__ ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , UpperCamelCase__ ) is not None
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = object_name.split(""".""" )
SCREAMING_SNAKE_CASE__ = 0
# First let's find the module where our object lives.
SCREAMING_SNAKE_CASE__ = parts[i]
while i < len(UpperCamelCase__ ) and not os.path.isfile(os.path.join(UpperCamelCase__ , f'''{module}.py''' ) ):
i += 1
if i < len(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , parts[i] )
if i >= len(UpperCamelCase__ ):
raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(UpperCamelCase__ , f'''{module}.py''' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
SCREAMING_SNAKE_CASE__ = f.readlines()
# Now let's find the class / func in the code!
SCREAMING_SNAKE_CASE__ = """"""
SCREAMING_SNAKE_CASE__ = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCamelCase__ ) and re.search(Rf'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCamelCase__ ):
raise ValueError(f''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
SCREAMING_SNAKE_CASE__ = line_index
while line_index < len(UpperCamelCase__ ) and _should_continue(lines[line_index] , UpperCamelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE__ = lines[start_index:line_index]
return "".join(UpperCamelCase__ )
_lowerCamelCase = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
_lowerCamelCase = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
_lowerCamelCase = re.compile(R'<FILL\s+[^>]*>')
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = code.split("""\n""" )
SCREAMING_SNAKE_CASE__ = 0
while idx < len(UpperCamelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCamelCase__ ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = len(get_indent(UpperCamelCase__ ) ) > 0
if has_indent:
SCREAMING_SNAKE_CASE__ = f'''class Bla:\n{code}'''
SCREAMING_SNAKE_CASE__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = black.format_str(UpperCamelCase__ , mode=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = style_docstrings_in_code(UpperCamelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: str=False ):
with open(UpperCamelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
SCREAMING_SNAKE_CASE__ = f.readlines()
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = search.groups()
SCREAMING_SNAKE_CASE__ = find_code_in_diffusers(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = get_indent(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = line_index + 1 if indent == theoretical_indent else line_index + 2
SCREAMING_SNAKE_CASE__ = theoretical_indent
SCREAMING_SNAKE_CASE__ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
SCREAMING_SNAKE_CASE__ = True
while line_index < len(UpperCamelCase__ ) and should_continue:
line_index += 1
if line_index >= len(UpperCamelCase__ ):
break
SCREAMING_SNAKE_CASE__ = lines[line_index]
SCREAMING_SNAKE_CASE__ = _should_continue(UpperCamelCase__ , UpperCamelCase__ ) and re.search(f'''^{indent}# End copy''' , UpperCamelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE__ = lines[start_index:line_index]
SCREAMING_SNAKE_CASE__ = """""".join(UpperCamelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
SCREAMING_SNAKE_CASE__ = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCamelCase__ ) is None]
SCREAMING_SNAKE_CASE__ = """\n""".join(UpperCamelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCamelCase__ ) > 0:
SCREAMING_SNAKE_CASE__ = replace_pattern.replace("""with""" , """""" ).split(""",""" )
SCREAMING_SNAKE_CASE__ = [_re_replace_pattern.search(UpperCamelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pattern.groups()
SCREAMING_SNAKE_CASE__ = re.sub(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if option.strip() == "all-casing":
SCREAMING_SNAKE_CASE__ = re.sub(obja.lower() , obja.lower() , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = re.sub(obja.upper() , obja.upper() , UpperCamelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
SCREAMING_SNAKE_CASE__ = blackify(lines[start_index - 1] + theoretical_code )
SCREAMING_SNAKE_CASE__ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
SCREAMING_SNAKE_CASE__ = lines[:start_index] + [theoretical_code] + lines[line_index:]
SCREAMING_SNAKE_CASE__ = start_index + 1
if overwrite and len(UpperCamelCase__ ) > 0:
# Warn the user a file has been modified.
print(f'''Detected changes, rewriting {filename}.''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase__ )
return diffs
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: bool = False ):
SCREAMING_SNAKE_CASE__ = glob.glob(os.path.join(UpperCamelCase__ , """**/*.py""" ) , recursive=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = []
for filename in all_files:
SCREAMING_SNAKE_CASE__ = is_copy_consistent(UpperCamelCase__ , UpperCamelCase__ )
diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(UpperCamelCase__ ) > 0:
SCREAMING_SNAKE_CASE__ = """\n""".join(UpperCamelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_lowerCamelCase = parser.parse_args()
check_copies(args.fix_and_overwrite) | 6 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ):
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 6 | 1 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCamelCase = 256
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["melgan"]
def __init__( self :List[Any] , __A :SpectrogramNotesEncoder , __A :SpectrogramContEncoder , __A :TaFilmDecoder , __A :DDPMScheduler , __A :OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
SCREAMING_SNAKE_CASE__ = math.log(1E-5 ) # Matches MelGAN training.
SCREAMING_SNAKE_CASE__ = 4.0 # Largest value for most examples
SCREAMING_SNAKE_CASE__ = 128
self.register_modules(
notes_encoder=__A , continuous_encoder=__A , decoder=__A , scheduler=__A , melgan=__A , )
def _snake_case ( self :str , __A :List[Any] , __A :Optional[int]=(-1.0, 1.0) , __A :Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = output_range
if clip:
SCREAMING_SNAKE_CASE__ = torch.clip(__A , self.min_value , self.max_value )
# Scale to [0, 1].
SCREAMING_SNAKE_CASE__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _snake_case ( self :Dict , __A :Tuple , __A :str=(-1.0, 1.0) , __A :List[str]=False ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input_range
SCREAMING_SNAKE_CASE__ = torch.clip(__A , __A , __A ) if clip else outputs
# Scale to [0, 1].
SCREAMING_SNAKE_CASE__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _snake_case ( self :Union[str, Any] , __A :Any , __A :List[Any] , __A :str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = input_tokens > 0
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.notes_encoder(
encoder_input_tokens=__A , encoder_inputs_mask=__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.continuous_encoder(
encoder_inputs=__A , encoder_inputs_mask=__A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _snake_case ( self :Any , __A :int , __A :str , __A :Dict ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = noise_time
if not torch.is_tensor(__A ):
SCREAMING_SNAKE_CASE__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(__A ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
SCREAMING_SNAKE_CASE__ = self.decoder(
encodings_and_masks=__A , decoder_input_tokens=__A , decoder_noise_time=__A )
return logits
@torch.no_grad()
def __call__( self :Dict , __A :List[List[int]] , __A :Optional[torch.Generator] = None , __A :int = 100 , __A :bool = True , __A :str = "numpy" , __A :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A :int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__A , __A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__A )}.''' )
SCREAMING_SNAKE_CASE__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = np.zeros([1, 0, self.n_dims] , np.floataa )
SCREAMING_SNAKE_CASE__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__A , device=self.device )
for i, encoder_input_tokens in enumerate(__A ):
if i == 0:
SCREAMING_SNAKE_CASE__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
SCREAMING_SNAKE_CASE__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
SCREAMING_SNAKE_CASE__ = ones
SCREAMING_SNAKE_CASE__ = self.scale_features(
__A , output_range=[-1.0, 1.0] , clip=__A )
SCREAMING_SNAKE_CASE__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__A , continuous_mask=__A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
SCREAMING_SNAKE_CASE__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE__ = self.decode(
encodings_and_masks=__A , input_tokens=__A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__A , __A , __A , generator=__A ).prev_sample
SCREAMING_SNAKE_CASE__ = self.scale_to_features(__A , input_range=[-1.0, 1.0] )
SCREAMING_SNAKE_CASE__ = mel[:1]
SCREAMING_SNAKE_CASE__ = mel.cpu().float().numpy()
SCREAMING_SNAKE_CASE__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__A , __A )
logger.info("""Generated segment""" , __A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
SCREAMING_SNAKE_CASE__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
SCREAMING_SNAKE_CASE__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__A ) | 6 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "encoder-decoder"
lowerCamelCase_ = True
def __init__( self :Optional[int] , **__A :str ) -> int:
"""simple docstring"""
super().__init__(**__A )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = True
@classmethod
def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A )
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.encoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.decoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output | 6 | 1 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
SCREAMING_SNAKE_CASE__ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class UpperCamelCase_ ( UpperCamelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase_ = Features({"text": Value("string" )} )
lowerCamelCase_ = Features({"labels": ClassLabel} )
lowerCamelCase_ = "text"
lowerCamelCase_ = "labels"
def _snake_case ( self :Any , __A :Dict ) -> Optional[Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ = self.label_schema.copy()
SCREAMING_SNAKE_CASE__ = features[self.label_column]
SCREAMING_SNAKE_CASE__ = label_schema
return task_template
@property
def _snake_case ( self :str ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
} | 6 | 1 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self :Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
SCREAMING_SNAKE_CASE__ = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(__A )
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ = load_dataset("""nielsr/rvlcdip-demo""" )
SCREAMING_SNAKE_CASE__ = dataset["""train"""][0]["""image"""].convert("""RGB""" )
SCREAMING_SNAKE_CASE__ = image_processor(__A , return_tensors="""pt""" ).to(__A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__A )
SCREAMING_SNAKE_CASE__ = outputs.logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 16) )
self.assertEqual(logits.shape , __A )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=__A , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __A , atol=1E-4 ) ) | 6 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = model.config
SCREAMING_SNAKE_CASE__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
SCREAMING_SNAKE_CASE__ = MBartConfig(
is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
if "encoder.model" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
SCREAMING_SNAKE_CASE__ = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE__ = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE__ = """encoder.layernorm.bias"""
return name
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
SCREAMING_SNAKE_CASE__ = key.split(""".""" )
SCREAMING_SNAKE_CASE__ = int(key_split[3] )
SCREAMING_SNAKE_CASE__ = int(key_split[5] )
SCREAMING_SNAKE_CASE__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ = val[:dim, :]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE__ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ = val[:dim]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
SCREAMING_SNAKE_CASE__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int=None , UpperCamelCase__: str=False ):
# load original model
SCREAMING_SNAKE_CASE__ = DonutModel.from_pretrained(UpperCamelCase__ ).eval()
# load HuggingFace model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_configs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = DonutSwinModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = original_model.state_dict()
SCREAMING_SNAKE_CASE__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify results on scanned document
SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/example-documents""" )
SCREAMING_SNAKE_CASE__ = dataset["""test"""][0]["""image"""].convert("""RGB""" )
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
SCREAMING_SNAKE_CASE__ = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
SCREAMING_SNAKE_CASE__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
SCREAMING_SNAKE_CASE__ = """When is the coffee break?"""
SCREAMING_SNAKE_CASE__ = task_prompt.replace("""{user_input}""" , UpperCamelCase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
SCREAMING_SNAKE_CASE__ = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
SCREAMING_SNAKE_CASE__ = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
SCREAMING_SNAKE_CASE__ = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
SCREAMING_SNAKE_CASE__ = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
SCREAMING_SNAKE_CASE__ = """hello world"""
else:
raise ValueError("""Model name not supported""" )
SCREAMING_SNAKE_CASE__ = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[
"""input_ids"""
]
SCREAMING_SNAKE_CASE__ = original_model.encoder.model.patch_embed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.encoder.embeddings(UpperCamelCase__ )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
# verify encoder hidden states
SCREAMING_SNAKE_CASE__ = original_model.encoder(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = model.encoder(UpperCamelCase__ ).last_hidden_state
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
# verify decoder hidden states
SCREAMING_SNAKE_CASE__ = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
_lowerCamelCase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 6 | 1 |
_lowerCamelCase = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
_lowerCamelCase = ['a', 'b', 'c', 'd', 'e']
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = start
# add current to visited
visited.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
SCREAMING_SNAKE_CASE__ = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# if all neighbors visited add current to sort
sort.append(UpperCamelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
for vertice in vertices:
if vertice not in visited:
SCREAMING_SNAKE_CASE__ = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# return sort
return sort
if __name__ == "__main__":
_lowerCamelCase = topological_sort('a', [], [])
print(sort) | 6 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__A , )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 6 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ):
try:
SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
while i * i <= n:
while n % i == 0:
SCREAMING_SNAKE_CASE__ = i
n //= i
i += 1
if n > 1:
SCREAMING_SNAKE_CASE__ = n
return int(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''') | 6 | 1 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :List[str] , **__A :Optional[int] ) -> Tuple:
"""simple docstring"""
super().__init__(**__A )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self :Optional[Any] , __A :Union[str, List[str], "Image", List["Image"]] , **__A :Any ) -> Tuple:
"""simple docstring"""
return super().__call__(__A , **__A )
def _snake_case ( self :Tuple , **__A :List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _snake_case ( self :List[Any] , __A :Optional[Any] , __A :str=None , __A :Dict="This is a photo of {}." ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_image(__A )
SCREAMING_SNAKE_CASE__ = self.image_processor(images=[image] , return_tensors=self.framework )
SCREAMING_SNAKE_CASE__ = candidate_labels
SCREAMING_SNAKE_CASE__ = [hypothesis_template.format(__A ) for x in candidate_labels]
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=self.framework , padding=__A )
SCREAMING_SNAKE_CASE__ = [text_inputs]
return inputs
def _snake_case ( self :str , __A :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""candidate_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __A ):
SCREAMING_SNAKE_CASE__ = text_inputs[0]
else:
# Batching case.
SCREAMING_SNAKE_CASE__ = text_inputs[0][0]
SCREAMING_SNAKE_CASE__ = self.model(**__A , **__A )
SCREAMING_SNAKE_CASE__ = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def _snake_case ( self :Dict , __A :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = model_outputs.pop("""candidate_labels""" )
SCREAMING_SNAKE_CASE__ = model_outputs["""logits"""][0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ = logits.softmax(dim=-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ = probs.tolist()
if not isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = [scores]
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ = stable_softmax(__A , axis=-1 )
SCREAMING_SNAKE_CASE__ = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
SCREAMING_SNAKE_CASE__ = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__A , __A ) , key=lambda __A : -x[0] )
]
return result | 6 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__A ) )
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(__A ) )
def _snake_case ( self :Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) ) | 6 | 1 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "efficientnet"
def __init__( self :Tuple , __A :int = 3 , __A :int = 600 , __A :float = 2.0 , __A :float = 3.1 , __A :int = 8 , __A :List[int] = [3, 3, 5, 3, 5, 5, 3] , __A :List[int] = [32, 16, 24, 40, 80, 112, 192] , __A :List[int] = [16, 24, 40, 80, 112, 192, 320] , __A :List[int] = [] , __A :List[int] = [1, 2, 2, 2, 1, 2, 1] , __A :List[int] = [1, 2, 2, 3, 3, 4, 1] , __A :List[int] = [1, 6, 6, 6, 6, 6, 6] , __A :float = 0.2_5 , __A :str = "swish" , __A :int = 2560 , __A :str = "mean" , __A :float = 0.0_2 , __A :float = 0.0_0_1 , __A :float = 0.9_9 , __A :float = 0.5 , __A :float = 0.2 , **__A :int , ) -> List[Any]:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = width_coefficient
SCREAMING_SNAKE_CASE__ = depth_coefficient
SCREAMING_SNAKE_CASE__ = depth_divisor
SCREAMING_SNAKE_CASE__ = kernel_sizes
SCREAMING_SNAKE_CASE__ = in_channels
SCREAMING_SNAKE_CASE__ = out_channels
SCREAMING_SNAKE_CASE__ = depthwise_padding
SCREAMING_SNAKE_CASE__ = strides
SCREAMING_SNAKE_CASE__ = num_block_repeats
SCREAMING_SNAKE_CASE__ = expand_ratios
SCREAMING_SNAKE_CASE__ = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dim
SCREAMING_SNAKE_CASE__ = pooling_type
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = batch_norm_eps
SCREAMING_SNAKE_CASE__ = batch_norm_momentum
SCREAMING_SNAKE_CASE__ = dropout_rate
SCREAMING_SNAKE_CASE__ = drop_connect_rate
SCREAMING_SNAKE_CASE__ = sum(__A ) * 4
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = version.parse("1.11" )
@property
def _snake_case ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self :List[str] ) -> float:
"""simple docstring"""
return 1E-5 | 6 |
import argparse
import datetime
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
SCREAMING_SNAKE_CASE__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCamelCase__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
SCREAMING_SNAKE_CASE__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
SCREAMING_SNAKE_CASE__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
SCREAMING_SNAKE_CASE__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
SCREAMING_SNAKE_CASE__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
SCREAMING_SNAKE_CASE__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
SCREAMING_SNAKE_CASE__ = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) )
# Start math
if m <= 2:
SCREAMING_SNAKE_CASE__ = y - 1
SCREAMING_SNAKE_CASE__ = m + 12
# maths var
SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[:2] )
SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[2:] )
SCREAMING_SNAKE_CASE__ = int(2.6 * m - 5.3_9 )
SCREAMING_SNAKE_CASE__ = int(c / 4 )
SCREAMING_SNAKE_CASE__ = int(k / 4 )
SCREAMING_SNAKE_CASE__ = int(d + k )
SCREAMING_SNAKE_CASE__ = int(t + u + v + x )
SCREAMING_SNAKE_CASE__ = int(z - (2 * c) )
SCREAMING_SNAKE_CASE__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
SCREAMING_SNAKE_CASE__ = f'''Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
_lowerCamelCase = parser.parse_args()
zeller(args.date_input) | 6 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = StableDiffusionSAGPipeline
lowerCamelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase_ = False
def _snake_case ( self :List[Any] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(__A )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _snake_case ( self :Optional[Any] , __A :List[str] , __A :Optional[int]=0 ) -> Any:
"""simple docstring"""
if str(__A ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__A )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__A ).manual_seed(__A )
SCREAMING_SNAKE_CASE__ = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _snake_case ( self :int ) -> int:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self :Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
SCREAMING_SNAKE_CASE__ = sag_pipe.to(__A )
sag_pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = """."""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sag_pipe(
[prompt] , generator=__A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def _snake_case ( self :Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ = sag_pipe.to(__A )
sag_pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = """."""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sag_pipe(
[prompt] , generator=__A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def _snake_case ( self :List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ = sag_pipe.to(__A )
sag_pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = """."""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sag_pipe(
[prompt] , width=768 , height=512 , generator=__A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images
assert image.shape == (1, 512, 768, 3) | 6 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_lowerCamelCase = logging.getLogger(__name__)
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=30522, type=int)
_lowerCamelCase = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, 'rb') as fp:
_lowerCamelCase = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
_lowerCamelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
_lowerCamelCase = [0] * args.vocab_size
for k, v in counter.items():
_lowerCamelCase = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) | 6 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class UpperCamelCase_ :
def __init__( self :Any , __A :int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = value
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
class UpperCamelCase_ :
def __init__( self :Dict , __A :Node ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = tree
def _snake_case ( self :List[str] , __A :Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self :Tuple ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "vit_mae"
def __init__( self :int , __A :str=768 , __A :str=12 , __A :List[Any]=12 , __A :Optional[Any]=3072 , __A :Optional[int]="gelu" , __A :List[str]=0.0 , __A :Optional[int]=0.0 , __A :Dict=0.0_2 , __A :List[str]=1E-12 , __A :List[Any]=224 , __A :Any=16 , __A :Tuple=3 , __A :Optional[Any]=True , __A :int=16 , __A :int=512 , __A :int=8 , __A :List[str]=2048 , __A :Any=0.7_5 , __A :Dict=False , **__A :int , ) -> List[str]:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = decoder_num_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_hidden_size
SCREAMING_SNAKE_CASE__ = decoder_num_hidden_layers
SCREAMING_SNAKE_CASE__ = decoder_intermediate_size
SCREAMING_SNAKE_CASE__ = mask_ratio
SCREAMING_SNAKE_CASE__ = norm_pix_loss | 6 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "OwlViTImageProcessor"
lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self :Optional[Any] , __A :int=None , __A :Optional[int]=None , **__A :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__A , __A )
def __call__( self :str , __A :Dict=None , __A :List[str]=None , __A :str=None , __A :Optional[int]="max_length" , __A :Tuple="np" , **__A :int ) -> Tuple:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )):
SCREAMING_SNAKE_CASE__ = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )]
elif isinstance(__A , __A ) and isinstance(text[0] , __A ):
SCREAMING_SNAKE_CASE__ = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE__ = max([len(__A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__A ) != max_num_queries:
SCREAMING_SNAKE_CASE__ = t + [""" """] * (max_num_queries - len(__A ))
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )
encodings.append(__A )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = input_ids
SCREAMING_SNAKE_CASE__ = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = self.image_processor(
__A , return_tensors=__A , **__A ).pixel_values
SCREAMING_SNAKE_CASE__ = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :List[Any] , *__A :Dict , **__A :Dict ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process(*__A , **__A )
def _snake_case ( self :Optional[int] , *__A :Dict , **__A :List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*__A , **__A )
def _snake_case ( self :str , *__A :List[str] , **__A :Union[str, Any] ) -> Any:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*__A , **__A )
def _snake_case ( self :Dict , *__A :List[str] , **__A :List[str] ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :Dict , *__A :Dict , **__A :List[str] ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , )
return self.image_processor_class
@property
def _snake_case ( self :Any ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , )
return self.image_processor | 6 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: tuple[int, int] , UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = position
SCREAMING_SNAKE_CASE__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
SCREAMING_SNAKE_CASE__ = []
for position in positions:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(UpperCamelCase__ )
return permissible_positions
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list[int]] , UpperCamelCase__: tuple[int, int] , UpperCamelCase__: int ):
if is_complete(UpperCamelCase__ ):
return True
for position in get_valid_pos(UpperCamelCase__ , len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = position
if board[y][x] == 0:
SCREAMING_SNAKE_CASE__ = curr + 1
if open_knight_tour_helper(UpperCamelCase__ , UpperCamelCase__ , curr + 1 ):
return True
SCREAMING_SNAKE_CASE__ = 0
return False
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = 1
if open_knight_tour_helper(UpperCamelCase__ , (i, j) , 1 ):
return board
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = f'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :Union[str, Any] , __A :int = 3 , __A :int = 3 , __A :Tuple[str] = ("DownEncoderBlock2D",) , __A :Tuple[str] = ("UpDecoderBlock2D",) , __A :Tuple[int] = (64,) , __A :int = 1 , __A :str = "silu" , __A :int = 3 , __A :int = 32 , __A :int = 256 , __A :int = 32 , __A :Optional[int] = None , __A :float = 0.1_8_2_1_5 , __A :str = "group" , ) -> Any:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE__ = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
SCREAMING_SNAKE_CASE__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = VectorQuantizer(__A , __A , beta=0.2_5 , remap=__A , sane_index_shape=__A )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
# pass init params to Decoder
SCREAMING_SNAKE_CASE__ = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , )
@apply_forward_hook
def _snake_case ( self :Union[str, Any] , __A :torch.FloatTensor , __A :bool = True ) -> VQEncoderOutput:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.encoder(__A )
SCREAMING_SNAKE_CASE__ = self.quant_conv(__A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__A )
@apply_forward_hook
def _snake_case ( self :Tuple , __A :torch.FloatTensor , __A :bool = False , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if not force_not_quantize:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.quantize(__A )
else:
SCREAMING_SNAKE_CASE__ = h
SCREAMING_SNAKE_CASE__ = self.post_quant_conv(__A )
SCREAMING_SNAKE_CASE__ = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
def _snake_case ( self :int , __A :torch.FloatTensor , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = sample
SCREAMING_SNAKE_CASE__ = self.encode(__A ).latents
SCREAMING_SNAKE_CASE__ = self.decode(__A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A ) | 6 | 1 |
import argparse
import struct
import unittest
class UpperCamelCase_ :
def __init__( self :Dict , __A :bytes ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = data
# Initialize hash values
SCREAMING_SNAKE_CASE__ = [
0X6A_09_E6_67,
0XBB_67_AE_85,
0X3C_6E_F3_72,
0XA5_4F_F5_3A,
0X51_0E_52_7F,
0X9B_05_68_8C,
0X1F_83_D9_AB,
0X5B_E0_CD_19,
]
# Initialize round constants
SCREAMING_SNAKE_CASE__ = [
0X42_8A_2F_98,
0X71_37_44_91,
0XB5_C0_FB_CF,
0XE9_B5_DB_A5,
0X39_56_C2_5B,
0X59_F1_11_F1,
0X92_3F_82_A4,
0XAB_1C_5E_D5,
0XD8_07_AA_98,
0X12_83_5B_01,
0X24_31_85_BE,
0X55_0C_7D_C3,
0X72_BE_5D_74,
0X80_DE_B1_FE,
0X9B_DC_06_A7,
0XC1_9B_F1_74,
0XE4_9B_69_C1,
0XEF_BE_47_86,
0X0F_C1_9D_C6,
0X24_0C_A1_CC,
0X2D_E9_2C_6F,
0X4A_74_84_AA,
0X5C_B0_A9_DC,
0X76_F9_88_DA,
0X98_3E_51_52,
0XA8_31_C6_6D,
0XB0_03_27_C8,
0XBF_59_7F_C7,
0XC6_E0_0B_F3,
0XD5_A7_91_47,
0X06_CA_63_51,
0X14_29_29_67,
0X27_B7_0A_85,
0X2E_1B_21_38,
0X4D_2C_6D_FC,
0X53_38_0D_13,
0X65_0A_73_54,
0X76_6A_0A_BB,
0X81_C2_C9_2E,
0X92_72_2C_85,
0XA2_BF_E8_A1,
0XA8_1A_66_4B,
0XC2_4B_8B_70,
0XC7_6C_51_A3,
0XD1_92_E8_19,
0XD6_99_06_24,
0XF4_0E_35_85,
0X10_6A_A0_70,
0X19_A4_C1_16,
0X1E_37_6C_08,
0X27_48_77_4C,
0X34_B0_BC_B5,
0X39_1C_0C_B3,
0X4E_D8_AA_4A,
0X5B_9C_CA_4F,
0X68_2E_6F_F3,
0X74_8F_82_EE,
0X78_A5_63_6F,
0X84_C8_78_14,
0X8C_C7_02_08,
0X90_BE_FF_FA,
0XA4_50_6C_EB,
0XBE_F9_A3_F7,
0XC6_71_78_F2,
]
SCREAMING_SNAKE_CASE__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _snake_case ( __A :bytes ) -> bytes:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = b"""\x80""" + (b"""\x00""" * (63 - (len(__A ) + 8) % 64))
SCREAMING_SNAKE_CASE__ = struct.pack(""">Q""" , (len(__A ) * 8) )
return data + padding + big_endian_integer
def _snake_case ( self :Dict ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
SCREAMING_SNAKE_CASE__ = list(struct.unpack(""">16L""" , __A ) )
# add 48 0-ed integers
words += [0] * 48
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
SCREAMING_SNAKE_CASE__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
SCREAMING_SNAKE_CASE__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
SCREAMING_SNAKE_CASE__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_00_00_00_00
# Compression
SCREAMING_SNAKE_CASE__ = self.ror(__A , 6 ) ^ self.ror(__A , 11 ) ^ self.ror(__A , 25 )
SCREAMING_SNAKE_CASE__ = (e & f) ^ ((~e & 0XFF_FF_FF_FF) & g)
SCREAMING_SNAKE_CASE__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_00_00_00_00
SCREAMING_SNAKE_CASE__ = self.ror(__A , 2 ) ^ self.ror(__A , 13 ) ^ self.ror(__A , 22 )
SCREAMING_SNAKE_CASE__ = (a & b) ^ (a & c) ^ (b & c)
SCREAMING_SNAKE_CASE__ = (sa + maj) % 0X1_00_00_00_00
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (
g,
f,
e,
((d + tempa) % 0X1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0X1_00_00_00_00),
)
SCREAMING_SNAKE_CASE__ = [a, b, c, d, e, f, g, h]
# Modify final values
SCREAMING_SNAKE_CASE__ = [
((element + mutated_hash_values[index]) % 0X1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
SCREAMING_SNAKE_CASE__ = """""".join([hex(__A )[2:].zfill(8 ) for value in self.hashes] )
def _snake_case ( self :Any , __A :int , __A :int ) -> int:
"""simple docstring"""
return 0XFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Tuple ) -> None:
"""simple docstring"""
import hashlib
SCREAMING_SNAKE_CASE__ = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(__A ).hash , hashlib.shaaaa(__A ).hexdigest() )
def SCREAMING_SNAKE_CASE__ ( ):
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
else:
SCREAMING_SNAKE_CASE__ = bytes(UpperCamelCase__ , """utf-8""" )
print(SHAaaa(UpperCamelCase__ ).hash )
if __name__ == "__main__":
main() | 6 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
lowerCamelCase_ = jnp.floataa
lowerCamelCase_ = True
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE__ = nn.Dense(5 , dtype=self.dtype )
def __call__( self :List[Any] , *__A :int , **__A :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super().__call__(*__A , **__A )
SCREAMING_SNAKE_CASE__ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = FlaxBigBirdForNaturalQuestionsModule
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
def cross_entropy(UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str]=None ):
SCREAMING_SNAKE_CASE__ = logits.shape[-1]
SCREAMING_SNAKE_CASE__ = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype("""f4""" )
SCREAMING_SNAKE_CASE__ = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 )
SCREAMING_SNAKE_CASE__ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
SCREAMING_SNAKE_CASE__ = reduction(UpperCamelCase__ )
return loss
SCREAMING_SNAKE_CASE__ = partial(UpperCamelCase__ , reduction=jnp.mean )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = "google/bigbird-roberta-base"
lowerCamelCase_ = 30_00
lowerCamelCase_ = 1_05_00
lowerCamelCase_ = 1_28
lowerCamelCase_ = 3
lowerCamelCase_ = 1
lowerCamelCase_ = 5
# tx_args
lowerCamelCase_ = 3e-5
lowerCamelCase_ = 0.0
lowerCamelCase_ = 2_00_00
lowerCamelCase_ = 0.0095
lowerCamelCase_ = "bigbird-roberta-natural-questions"
lowerCamelCase_ = "training-expt"
lowerCamelCase_ = "data/nq-training.jsonl"
lowerCamelCase_ = "data/nq-validation.jsonl"
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=__A )
SCREAMING_SNAKE_CASE__ = os.path.join(self.base_dir , self.save_dir )
SCREAMING_SNAKE_CASE__ = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = 40_96 # no dynamic padding on TPUs
def __call__( self :Optional[Any] , __A :Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.collate_fn(__A )
SCREAMING_SNAKE_CASE__ = jax.tree_util.tree_map(__A , __A )
return batch
def _snake_case ( self :List[Any] , __A :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.fetch_inputs(features["""input_ids"""] )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": jnp.array(__A , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(__A , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def _snake_case ( self :Tuple , __A :list ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self._fetch_inputs(__A ) for ids in input_ids]
return zip(*__A )
def _snake_case ( self :List[str] , __A :list ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [1 for _ in range(len(__A ) )]
while len(__A ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any]=None ):
if seed is not None:
SCREAMING_SNAKE_CASE__ = dataset.shuffle(seed=UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) // batch_size ):
SCREAMING_SNAKE_CASE__ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase__ )
@partial(jax.pmap , axis_name="""batch""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ):
def loss_fn(UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs
return state.loss_fn(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = jax.random.split(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = jax.value_and_grad(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = grad_fn(state.params )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean(UpperCamelCase__ , """batch""" )
SCREAMING_SNAKE_CASE__ = state.apply_gradients(grads=UpperCamelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs
SCREAMING_SNAKE_CASE__ = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class UpperCamelCase_ ( train_state.TrainState ):
lowerCamelCase_ = struct.field(pytree_node=UpperCamelCase__ )
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = None
def _snake_case ( self :List[Any] , __A :str , __A :str , __A :str , __A :Tuple=None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = model.params
SCREAMING_SNAKE_CASE__ = TrainState.create(
apply_fn=model.__call__ , params=__A , tx=__A , loss_fn=__A , )
if ckpt_dir is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = restore_checkpoint(__A , __A )
SCREAMING_SNAKE_CASE__ = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = build_tx(**__A )
SCREAMING_SNAKE_CASE__ = train_state.TrainState(
step=__A , apply_fn=model.__call__ , params=__A , tx=__A , opt_state=__A , )
SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = data_collator
SCREAMING_SNAKE_CASE__ = lr
SCREAMING_SNAKE_CASE__ = params
SCREAMING_SNAKE_CASE__ = jax_utils.replicate(__A )
return state
def _snake_case ( self :Optional[Any] , __A :Optional[int] , __A :int , __A :int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.args
SCREAMING_SNAKE_CASE__ = len(__A ) // args.batch_size
SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ = jax.random.split(__A , jax.device_count() )
for epoch in range(args.max_epochs ):
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , args.batch_size , seed=__A )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(__A , total=__A , desc=f'''Running EPOCH-{epoch}''' ):
SCREAMING_SNAKE_CASE__ = self.data_collator(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.train_step_fn(__A , __A , **__A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(state.step )
SCREAMING_SNAKE_CASE__ = running_loss.item() / i
SCREAMING_SNAKE_CASE__ = self.scheduler_fn(state_step - 1 )
SCREAMING_SNAKE_CASE__ = self.evaluate(__A , __A )
SCREAMING_SNAKE_CASE__ = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(__A ) )
self.logger.log(__A , commit=__A )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=__A )
def _snake_case ( self :List[str] , __A :Dict , __A :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , self.args.batch_size )
SCREAMING_SNAKE_CASE__ = len(__A ) // self.args.batch_size
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(__A , total=__A , desc="""Evaluating ... """ ):
SCREAMING_SNAKE_CASE__ = self.data_collator(__A )
SCREAMING_SNAKE_CASE__ = self.val_step_fn(__A , **__A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _snake_case ( self :List[Any] , __A :Any , __A :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(__A )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ )
self.model_save_fn(__A , params=state.params )
with open(os.path.join(__A , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__A , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(__A , """data_collator.joblib""" ) )
with open(os.path.join(__A , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , __A )
print("""DONE""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ):
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ )
with open(os.path.join(UpperCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.opt_state , f.read() )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """args.joblib""" ) )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) )
with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """r""" ) as f:
SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = num_train_steps - warmup_steps
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1e-7 , transition_steps=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ):
def weight_decay_mask(UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ )
return tx, lr | 6 | 1 |
from torch import nn
class UpperCamelCase_ ( nn.Module ):
def __init__( self :List[Any] , __A :List[str] , __A :List[Any] ) -> List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = class_size
SCREAMING_SNAKE_CASE__ = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
SCREAMING_SNAKE_CASE__ = nn.Linear(__A , __A )
def _snake_case ( self :Tuple , __A :Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.mlp(__A )
return logits | 6 |
from torch import nn
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' ) | 6 | 1 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] , UpperCamelCase__: int , UpperCamelCase__: Tuple ):
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE__ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Tuple , UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
SCREAMING_SNAKE_CASE__ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
SCREAMING_SNAKE_CASE__ = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
SCREAMING_SNAKE_CASE__ = features.copy()
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , split=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[Any] ):
if issubclass(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = jsonl_path
elif issubclass(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = [jsonl_path]
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Dict=("train",) ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
for split in splits:
SCREAMING_SNAKE_CASE__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_json_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE__ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = JsonDatasetReader({"""train""": jsonl_path} , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[Any] ):
if split:
SCREAMING_SNAKE_CASE__ = {split: jsonl_path}
else:
SCREAMING_SNAKE_CASE__ = """train"""
SCREAMING_SNAKE_CASE__ = {"""train""": jsonl_path, """test""": jsonl_path}
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_datasetdict(UpperCamelCase__ , UpperCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
return json.load(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
return [json.loads(UpperCamelCase__ ) for line in buffer]
class UpperCamelCase_ :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def _snake_case ( self :str , __A :int , __A :Optional[Any] , __A :Dict ) -> Tuple:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , lines=__A ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json_function(__A )
assert isinstance(__A , __A )
assert isinstance(exported_content[0] , __A )
assert len(__A ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def _snake_case ( self :Optional[int] , __A :Union[str, Any] , __A :Tuple , __A :int , __A :str , __A :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , lines=__A , orient=__A ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json(__A )
assert isinstance(__A , __A )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__A , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__A ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def _snake_case ( self :Union[str, Any] , __A :Tuple , __A :Dict , __A :Any ) -> int:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , lines=__A , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json_function(__A )
assert isinstance(__A , __A )
assert isinstance(exported_content[0] , __A )
assert len(__A ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def _snake_case ( self :int , __A :List[str] , __A :List[Any] , __A :List[Any] , __A :str , __A :List[Any] ) -> Optional[int]:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , lines=__A , orient=__A , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json(__A )
assert isinstance(__A , __A )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__A , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__A ) == 10
def _snake_case ( self :Union[str, Any] , __A :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
with pytest.raises(__A ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def _snake_case ( self :Dict , __A :List[Any] , __A :Any , __A :str , __A :Tuple , __A :Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / f'''test.json.{extension}'''
SCREAMING_SNAKE_CASE__ = str(shared_datadir / f'''test_file.json.{extension}''' )
JsonDatasetWriter(__A , __A , compression=__A ).write()
with fsspec.open(__A , """rb""" , compression="""infer""" ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
with fsspec.open(__A , """rb""" , compression="""infer""" ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
assert exported_content == original_content | 6 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 847
SCREAMING_SNAKE_CASE__ = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 150
SCREAMING_SNAKE_CASE__ = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 171
SCREAMING_SNAKE_CASE__ = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
SCREAMING_SNAKE_CASE__ = 133
SCREAMING_SNAKE_CASE__ = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 19
SCREAMING_SNAKE_CASE__ = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 65
SCREAMING_SNAKE_CASE__ = """mapillary-vistas-id2label.json"""
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] ):
SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = val
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: bool = False ):
SCREAMING_SNAKE_CASE__ = get_maskformer_config(UpperCamelCase__ )
# load original state_dict
with open(UpperCamelCase__ , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# update to torch tensors
for key, value in state_dict.items():
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
# load 🤗 model
SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(UpperCamelCase__ )
model.eval()
for name, param in model.named_parameters():
print(UpperCamelCase__ , param.shape )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCamelCase__ ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
SCREAMING_SNAKE_CASE__ = prepare_img()
if "vistas" in model_name:
SCREAMING_SNAKE_CASE__ = 65
elif "cityscapes" in model_name:
SCREAMING_SNAKE_CASE__ = 65_535
else:
SCREAMING_SNAKE_CASE__ = 255
SCREAMING_SNAKE_CASE__ = True if """ade""" in model_name else False
SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=UpperCamelCase__ , reduce_labels=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase__ )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCamelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 6 | 1 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: int ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCamelCase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod() | 6 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowerCamelCase = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowerCamelCase = 0
_lowerCamelCase = 0XE0_00
_lowerCamelCase = 0XE0_01
_lowerCamelCase = 0XE0_02
_lowerCamelCase = 0XE0_03
_lowerCamelCase = 0XE0_04
# Maps special codepoints to human-readable names.
_lowerCamelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :str , __A :str=chr(__A ) , __A :str=chr(__A ) , __A :Dict=chr(__A ) , __A :str=chr(__A ) , __A :Union[str, Any]=chr(__A ) , __A :str=chr(__A ) , __A :int=False , __A :int=2048 , **__A :Dict , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE__ = len(self._special_codepoints )
@property
def _snake_case ( self :Optional[Any] ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def _snake_case ( self :Tuple , __A :str ) -> List[str]:
"""simple docstring"""
return list(__A )
def _snake_case ( self :Optional[Any] , __A :str ) -> int:
"""simple docstring"""
try:
return ord(__A )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def _snake_case ( self :str , __A :int ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__A )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Any:
"""simple docstring"""
return "".join(__A )
def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
result += ([0] * len(__A )) + [1]
return result
def _snake_case ( self :List[str] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Any:
"""simple docstring"""
return () | 6 | 1 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCamelCase_ :
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _snake_case ( self :List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__A )
SCREAMING_SNAKE_CASE__ = inputs["""prompt"""]
SCREAMING_SNAKE_CASE__ = inputs["""generator"""]
SCREAMING_SNAKE_CASE__ = inputs["""num_inference_steps"""]
SCREAMING_SNAKE_CASE__ = inputs["""output_type"""]
if "image" in inputs:
SCREAMING_SNAKE_CASE__ = inputs["""image"""]
else:
SCREAMING_SNAKE_CASE__ = None
if "mask_image" in inputs:
SCREAMING_SNAKE_CASE__ = inputs["""mask_image"""]
else:
SCREAMING_SNAKE_CASE__ = None
if "original_image" in inputs:
SCREAMING_SNAKE_CASE__ = inputs["""original_image"""]
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pipe.encode_prompt(__A )
# inputs with prompt converted to embeddings
SCREAMING_SNAKE_CASE__ = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
SCREAMING_SNAKE_CASE__ = image
if mask_image is not None:
SCREAMING_SNAKE_CASE__ = mask_image
if original_image is not None:
SCREAMING_SNAKE_CASE__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__A , __A , __A )
SCREAMING_SNAKE_CASE__ = pipe(**__A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__A )
SCREAMING_SNAKE_CASE__ = self.pipeline_class.from_pretrained(__A )
pipe_loaded.to(__A )
pipe_loaded.set_progress_bar_config(disable=__A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__A , __A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__A )
SCREAMING_SNAKE_CASE__ = inputs["""generator"""]
SCREAMING_SNAKE_CASE__ = inputs["""num_inference_steps"""]
SCREAMING_SNAKE_CASE__ = inputs["""output_type"""]
# inputs with prompt converted to embeddings
SCREAMING_SNAKE_CASE__ = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
SCREAMING_SNAKE_CASE__ = image
if mask_image is not None:
SCREAMING_SNAKE_CASE__ = mask_image
if original_image is not None:
SCREAMING_SNAKE_CASE__ = original_image
SCREAMING_SNAKE_CASE__ = pipe_loaded(**__A )[0]
SCREAMING_SNAKE_CASE__ = np.abs(to_np(__A ) - to_np(__A ) ).max()
self.assertLess(__A , 1E-4 )
def _snake_case ( self :Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__A )
SCREAMING_SNAKE_CASE__ = pipe(**__A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__A )
SCREAMING_SNAKE_CASE__ = self.pipeline_class.from_pretrained(__A )
pipe_loaded.to(__A )
pipe_loaded.set_progress_bar_config(disable=__A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__A )
SCREAMING_SNAKE_CASE__ = pipe_loaded(**__A )[0]
SCREAMING_SNAKE_CASE__ = np.abs(to_np(__A ) - to_np(__A ) ).max()
self.assertLess(__A , 1E-4 ) | 6 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_lowerCamelCase = 'bert-base-cased'
_lowerCamelCase = 'fp16'
_lowerCamelCase = 'bf16'
_lowerCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def _snake_case ( self :List[Any] ) -> Tuple:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = f'''{i + 1}'''
SCREAMING_SNAKE_CASE__ = strategy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = prefetch_policy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = state_dict_type
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A )
for policy in FSDP_AUTO_WRAP_POLICY:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = policy
if policy == "TRANSFORMER_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """2000"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """T5Layer"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
with self.assertRaises(__A ) as cm:
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """0"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _snake_case ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = mp_dtype
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = Accelerator()
if mp_dtype == "fp16":
SCREAMING_SNAKE_CASE__ = torch.floataa
elif mp_dtype == "bf16":
SCREAMING_SNAKE_CASE__ = torch.bfloataa
SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__A )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = str(__A ).lower()
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = 0.8_2
SCREAMING_SNAKE_CASE__ = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
SCREAMING_SNAKE_CASE__ = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def _snake_case ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" )
SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
SCREAMING_SNAKE_CASE__ = cmd.copy()
for i, strategy in enumerate(__A ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
SCREAMING_SNAKE_CASE__ = len(__A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
SCREAMING_SNAKE_CASE__ = cmd_config[:-1]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
SCREAMING_SNAKE_CASE__ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(__A ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() ) | 6 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "unispeech"
def __init__( self :Tuple , __A :str=32 , __A :Optional[int]=768 , __A :Optional[int]=12 , __A :Any=12 , __A :List[str]=3072 , __A :Optional[int]="gelu" , __A :Dict=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=0.1 , __A :Union[str, Any]=0.0 , __A :Any=0.0 , __A :List[Any]=0.1 , __A :Any=0.1 , __A :List[str]=0.0_2 , __A :Any=1E-5 , __A :Optional[int]="group" , __A :Tuple="gelu" , __A :Dict=(512, 512, 512, 512, 512, 512, 512) , __A :Dict=(5, 2, 2, 2, 2, 2, 2) , __A :List[Any]=(10, 3, 3, 3, 3, 2, 2) , __A :List[Any]=False , __A :int=128 , __A :int=16 , __A :Any=False , __A :int=True , __A :Tuple=0.0_5 , __A :Optional[int]=10 , __A :List[str]=2 , __A :Dict=0.0 , __A :Optional[Any]=10 , __A :Tuple=0 , __A :Tuple=320 , __A :Tuple=2 , __A :int=0.1 , __A :Optional[Any]=100 , __A :List[Any]=256 , __A :Optional[Any]=256 , __A :Optional[Any]=0.1 , __A :Tuple="mean" , __A :int=False , __A :int=False , __A :Dict=256 , __A :Union[str, Any]=80 , __A :str=0 , __A :Any=1 , __A :str=2 , __A :Dict=0.5 , **__A :List[str] , ) -> int:
"""simple docstring"""
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = feat_extract_norm
SCREAMING_SNAKE_CASE__ = feat_extract_activation
SCREAMING_SNAKE_CASE__ = list(__A )
SCREAMING_SNAKE_CASE__ = list(__A )
SCREAMING_SNAKE_CASE__ = list(__A )
SCREAMING_SNAKE_CASE__ = conv_bias
SCREAMING_SNAKE_CASE__ = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = feat_proj_dropout
SCREAMING_SNAKE_CASE__ = final_dropout
SCREAMING_SNAKE_CASE__ = layerdrop
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_ctc_classes
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = do_stable_layer_norm
SCREAMING_SNAKE_CASE__ = use_weighted_layer_sum
SCREAMING_SNAKE_CASE__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ = apply_spec_augment
SCREAMING_SNAKE_CASE__ = mask_time_prob
SCREAMING_SNAKE_CASE__ = mask_time_length
SCREAMING_SNAKE_CASE__ = mask_time_min_masks
SCREAMING_SNAKE_CASE__ = mask_feature_prob
SCREAMING_SNAKE_CASE__ = mask_feature_length
SCREAMING_SNAKE_CASE__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE__ = num_codevectors_per_group
SCREAMING_SNAKE_CASE__ = num_codevector_groups
SCREAMING_SNAKE_CASE__ = contrastive_logits_temperature
SCREAMING_SNAKE_CASE__ = feat_quantizer_dropout
SCREAMING_SNAKE_CASE__ = num_negatives
SCREAMING_SNAKE_CASE__ = codevector_dim
SCREAMING_SNAKE_CASE__ = proj_codevector_dim
SCREAMING_SNAKE_CASE__ = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE__ = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ = ctc_zero_infinity
# pretraining loss
SCREAMING_SNAKE_CASE__ = replace_prob
@property
def _snake_case ( self :int ) -> int:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 6 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCamelCase = logging.get_logger(__name__)
# General docstring
_lowerCamelCase = 'PoolFormerConfig'
# Base docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = [1, 512, 7, 7]
# Image classification docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = 'tabby, tabby cat'
_lowerCamelCase = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ):
if drop_prob == 0.0 or not training:
return input
SCREAMING_SNAKE_CASE__ = 1 - drop_prob
SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor
return output
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = drop_prob
def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return drop_path(__A , self.drop_prob , self.training )
def _snake_case ( self :Dict ) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size)
SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride)
SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding)
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A )
SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity()
def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.projection(__A )
SCREAMING_SNAKE_CASE__ = self.norm(__A )
return embeddings
class UpperCamelCase_ ( nn.GroupNorm ):
def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict:
"""simple docstring"""
super().__init__(1 , __A , **__A )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :List[str] , __A :Optional[int] ) -> Any:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A )
def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.pool(__A ) - hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A )
if isinstance(config.hidden_act , __A ):
SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE__ = config.hidden_act
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.act_fn(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
# Useful for training neural nets
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity()
SCREAMING_SNAKE_CASE__ = config.use_layer_scale
if config.use_layer_scale:
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str:
"""simple docstring"""
if self.use_layer_scale:
SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = ()
SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
else:
SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) )
# First residual connection
SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states
SCREAMING_SNAKE_CASE__ = ()
# Second residual connection inside the PoolFormerOutput block
SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) )
SCREAMING_SNAKE_CASE__ = hidden_states + layer_output
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = config
# stochastic depth decay rule
SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
SCREAMING_SNAKE_CASE__ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
# Transformer blocks
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
SCREAMING_SNAKE_CASE__ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__A ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None
SCREAMING_SNAKE_CASE__ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers
# Get patch embeddings from hidden_states
SCREAMING_SNAKE_CASE__ = embedding_layer(__A )
# Send the embeddings through the blocks
for _, blk in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = blk(__A )
SCREAMING_SNAKE_CASE__ = layer_outputs[0]
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PoolFormerConfig
lowerCamelCase_ = "poolformer"
lowerCamelCase_ = "pixel_values"
lowerCamelCase_ = True
def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict:
"""simple docstring"""
if isinstance(__A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any:
"""simple docstring"""
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = value
_lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Any ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A )
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
SCREAMING_SNAKE_CASE__ = self.encoder(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :int , __A :Optional[int] ) -> Tuple:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size )
def _snake_case ( self :List[Any] , __A :Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dense(__A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :str , __A :Union[str, Any] ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config.num_labels
SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A )
# Final norm
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
SCREAMING_SNAKE_CASE__ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = self.poolformer(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = outputs[0]
SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) )
SCREAMING_SNAKE_CASE__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE__ = """single_label_classification"""
else:
SCREAMING_SNAKE_CASE__ = """multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE__ = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
if not return_dict:
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states ) | 6 | 1 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: list[float] ):
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
SCREAMING_SNAKE_CASE__ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(UpperCamelCase__ ) )
return round(UpperCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_lengths
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = gelu_activation
SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings
SCREAMING_SNAKE_CASE__ = causal
SCREAMING_SNAKE_CASE__ = asm
SCREAMING_SNAKE_CASE__ = n_langs
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_special
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = summary_type
SCREAMING_SNAKE_CASE__ = use_proj
SCREAMING_SNAKE_CASE__ = scope
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 )
def _snake_case ( self :int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__A )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__A )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__A )
@slow
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(config=__A )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE__ = torch.jit.trace(
__A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) )
SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A )
loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__A )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) ) | 6 | 1 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ = True
for i in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE__ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE__ = dp[i][j - 1]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE__ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
SCREAMING_SNAKE_CASE__ = s - 2 * j
break
return diff | 6 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple=True ):
model.train()
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any]=False ):
set_seed(42 )
SCREAMING_SNAKE_CASE__ = RegressionModel()
SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
# Test when on a single CPU or GPU that the context manager does nothing
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
# Test on distributed setup that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=False , UpperCamelCase__: Union[str, Any]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if iteration < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if batch_num < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(UpperCamelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(UpperCamelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 6 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_lowerCamelCase = NewType('DataClass', Any)
_lowerCamelCase = NewType('DataClassType', Any)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list ):
SCREAMING_SNAKE_CASE__ = {str(UpperCamelCase__ ): choice for choice in choices}
return lambda UpperCamelCase__ : str_to_choice.get(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( *,
UpperCamelCase__: Union[str, List[str]] = None , UpperCamelCase__: str = None , UpperCamelCase__: Any = dataclasses.MISSING , UpperCamelCase__: Callable[[], Any] = dataclasses.MISSING , UpperCamelCase__: dict = None , **UpperCamelCase__: Union[str, Any] , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE__ = {}
if aliases is not None:
SCREAMING_SNAKE_CASE__ = aliases
if help is not None:
SCREAMING_SNAKE_CASE__ = help
return dataclasses.field(metadata=UpperCamelCase__ , default=UpperCamelCase__ , default_factory=UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
def __init__( self :Dict , __A :Union[DataClassType, Iterable[DataClassType]] , **__A :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE__ = ArgumentDefaultsHelpFormatter
super().__init__(**__A )
if dataclasses.is_dataclass(__A ):
SCREAMING_SNAKE_CASE__ = [dataclass_types]
SCREAMING_SNAKE_CASE__ = list(__A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__A )
@staticmethod
def _snake_case ( __A :ArgumentParser , __A :dataclasses.Field ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = f'''--{field.name}'''
SCREAMING_SNAKE_CASE__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __A ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""aliases""" , [] )
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = [aliases]
SCREAMING_SNAKE_CASE__ = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(__A , """UnionType""" ) and isinstance(__A , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f''' Problem encountered in field \'{field.name}\'.''' )
if type(__A ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE__ = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE__ = (
field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE__ = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE__ = {}
if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE__ = field.type.__args__
else:
SCREAMING_SNAKE_CASE__ = [x.value for x in field.type]
SCREAMING_SNAKE_CASE__ = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
else:
SCREAMING_SNAKE_CASE__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE__ = copy(__A )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE__ = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE__ = """?"""
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE__ = True
elif isclass(__A ) and issubclass(__A , __A ):
SCREAMING_SNAKE_CASE__ = field.type.__args__[0]
SCREAMING_SNAKE_CASE__ = """+"""
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = True
else:
SCREAMING_SNAKE_CASE__ = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
else:
SCREAMING_SNAKE_CASE__ = True
parser.add_argument(__A , *__A , **__A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE__ = False
parser.add_argument(f'''--no_{field.name}''' , action="""store_false""" , dest=field.name , **__A )
def _snake_case ( self :Tuple , __A :DataClassType ) -> List[str]:
"""simple docstring"""
if hasattr(__A , """_argument_group_name""" ):
SCREAMING_SNAKE_CASE__ = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE__ = self
try:
SCREAMING_SNAKE_CASE__ = get_type_hints(__A )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__A ):
SCREAMING_SNAKE_CASE__ = """.""".join(map(__A , sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(__A ):
if not field.init:
continue
SCREAMING_SNAKE_CASE__ = type_hints[field.name]
self._parse_dataclass_field(__A , __A )
def _snake_case ( self :Optional[Any] , __A :Union[str, Any]=None , __A :Any=False , __A :Union[str, Any]=True , __A :List[str]=None , __A :Union[str, Any]=None , ) -> Tuple[DataClass, ...]:
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE__ = []
if args_filename:
args_files.append(Path(__A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE__ = ArgumentParser()
args_file_parser.add_argument(__A , type=__A , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = args_file_parser.parse_known_args(args=__A )
SCREAMING_SNAKE_CASE__ = vars(__A ).get(args_file_flag.lstrip("""-""" ) , __A )
if cmd_args_file_paths:
args_files.extend([Path(__A ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE__ = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.parse_known_args(args=__A )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(__A ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in vars(__A ).items() if k in keys}
for k in keys:
delattr(__A , __A )
SCREAMING_SNAKE_CASE__ = dtype(**__A )
outputs.append(__A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _snake_case ( self :Tuple , __A :Dict[str, Any] , __A :bool = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = set(args.keys() )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(__A ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE__ = dtype(**__A )
outputs.append(__A )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(__A )}''' )
return tuple(__A )
def _snake_case ( self :str , __A :str , __A :bool = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
with open(Path(__A ) , encoding="""utf-8""" ) as open_json_file:
SCREAMING_SNAKE_CASE__ = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE__ = self.parse_dict(__A , allow_extra_keys=__A )
return tuple(__A )
def _snake_case ( self :str , __A :str , __A :bool = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A )
return tuple(__A ) | 6 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "AutoImageProcessor"
lowerCamelCase_ = "AutoTokenizer"
def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict:
"""simple docstring"""
super().__init__(__A , __A )
SCREAMING_SNAKE_CASE__ = self.image_processor
def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"] | 6 | 1 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_lowerCamelCase = None
_lowerCamelCase = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_lowerCamelCase = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = True
lowerCamelCase_ = None
# Automatically constructed
lowerCamelCase_ = "PIL.Image.Image"
lowerCamelCase_ = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
lowerCamelCase_ = field(default="Image" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self.pa_type
def _snake_case ( self :int , __A :Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = np.array(__A )
if isinstance(__A , __A ):
return {"path": value, "bytes": None}
elif isinstance(__A , __A ):
return {"path": None, "bytes": value}
elif isinstance(__A , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__A )
elif isinstance(__A , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__A )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _snake_case ( self :Tuple , __A :dict , __A :Optional[Any]=None ) -> "PIL.Image.Image":
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(__A ):
SCREAMING_SNAKE_CASE__ = PIL.Image.open(__A )
else:
SCREAMING_SNAKE_CASE__ = path.split("""::""" )[-1]
try:
SCREAMING_SNAKE_CASE__ = string_to_dict(__A , config.HUB_DATASETS_URL )["""repo_id"""]
SCREAMING_SNAKE_CASE__ = token_per_repo_id.get(__A )
except ValueError:
SCREAMING_SNAKE_CASE__ = None
with xopen(__A , """rb""" , use_auth_token=__A ) as f:
SCREAMING_SNAKE_CASE__ = BytesIO(f.read() )
SCREAMING_SNAKE_CASE__ = PIL.Image.open(bytes_ )
else:
SCREAMING_SNAKE_CASE__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _snake_case ( self :str ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def _snake_case ( self :List[Any] , __A :Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE__ = pa.array([None] * len(__A ) , type=pa.binary() )
SCREAMING_SNAKE_CASE__ = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE__ = pa.array([None] * len(__A ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
SCREAMING_SNAKE_CASE__ = storage.field("""bytes""" )
else:
SCREAMING_SNAKE_CASE__ = pa.array([None] * len(__A ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
SCREAMING_SNAKE_CASE__ = storage.field("""path""" )
else:
SCREAMING_SNAKE_CASE__ = pa.array([None] * len(__A ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
SCREAMING_SNAKE_CASE__ = pa.array(
[encode_np_array(np.array(__A ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
SCREAMING_SNAKE_CASE__ = pa.array([None] * len(__A ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(__A , self.pa_type )
def _snake_case ( self :Tuple , __A :pa.StructArray ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(__A :Tuple ):
with xopen(__A , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
return bytes_
SCREAMING_SNAKE_CASE__ = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE__ = pa.array(
[os.path.basename(__A ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(__A , self.pa_type )
def SCREAMING_SNAKE_CASE__ ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
SCREAMING_SNAKE_CASE__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: "PIL.Image.Image" ):
SCREAMING_SNAKE_CASE__ = BytesIO()
if image.format in list_image_compression_formats():
SCREAMING_SNAKE_CASE__ = image.format
else:
SCREAMING_SNAKE_CASE__ = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(UpperCamelCase__ , format=UpperCamelCase__ )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: "PIL.Image.Image" ):
if hasattr(UpperCamelCase__ , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCamelCase__ )}
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: np.ndarray ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
SCREAMING_SNAKE_CASE__ = array.dtype
SCREAMING_SNAKE_CASE__ = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
SCREAMING_SNAKE_CASE__ = dtype.kind
SCREAMING_SNAKE_CASE__ = dtype.itemsize
SCREAMING_SNAKE_CASE__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
SCREAMING_SNAKE_CASE__ = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
SCREAMING_SNAKE_CASE__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
SCREAMING_SNAKE_CASE__ = dtype_byteorder + dtype_kind + str(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = np.dtype(UpperCamelCase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
SCREAMING_SNAKE_CASE__ = PIL.Image.fromarray(array.astype(UpperCamelCase__ ) )
return {"path": None, "bytes": image_to_bytes(UpperCamelCase__ )}
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = first_non_null_value(UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE__ = no_op_if_value_is_null(UpperCamelCase__ )
return [obj_to_image_dict_func(UpperCamelCase__ ) for obj in objs]
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = no_op_if_value_is_null(UpperCamelCase__ )
return [obj_to_image_dict_func(UpperCamelCase__ ) for obj in objs]
else:
return objs
else:
return objs | 6 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ = True
for i in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE__ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE__ = dp[i][j - 1]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE__ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
SCREAMING_SNAKE_CASE__ = s - 2 * j
break
return diff | 6 | 1 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE__ = MobileBertConfig.from_json_file(UpperCamelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ = MobileBertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
SCREAMING_SNAKE_CASE__ = load_tf_weights_in_mobilebert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path) | 6 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ):
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 6 | 1 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ :
def __init__( self :Optional[Any] , __A :int , __A :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = question_encoder
SCREAMING_SNAKE_CASE__ = generator
SCREAMING_SNAKE_CASE__ = self.question_encoder
def _snake_case ( self :Tuple , __A :List[str] ) -> Optional[int]:
"""simple docstring"""
if os.path.isfile(__A ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__A , exist_ok=__A )
SCREAMING_SNAKE_CASE__ = os.path.join(__A , """question_encoder_tokenizer""" )
SCREAMING_SNAKE_CASE__ = os.path.join(__A , """generator_tokenizer""" )
self.question_encoder.save_pretrained(__A )
self.generator.save_pretrained(__A )
@classmethod
def _snake_case ( cls :Union[str, Any] , __A :List[Any] , **__A :Any ) -> Union[str, Any]:
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
SCREAMING_SNAKE_CASE__ = kwargs.pop("""config""" , __A )
if config is None:
SCREAMING_SNAKE_CASE__ = RagConfig.from_pretrained(__A )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
__A , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
__A , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=__A , generator=__A )
def __call__( self :Any , *__A :str , **__A :int ) -> str:
"""simple docstring"""
return self.current_tokenizer(*__A , **__A )
def _snake_case ( self :str , *__A :Any , **__A :Tuple ) -> List[str]:
"""simple docstring"""
return self.generator.batch_decode(*__A , **__A )
def _snake_case ( self :Optional[int] , *__A :Optional[Any] , **__A :Dict ) -> Optional[Any]:
"""simple docstring"""
return self.generator.decode(*__A , **__A )
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.question_encoder
def _snake_case ( self :Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.generator
def _snake_case ( self :Dict , __A :List[str] , __A :Optional[List[str]] = None , __A :Optional[int] = None , __A :Optional[int] = None , __A :str = "longest" , __A :str = None , __A :bool = True , **__A :Union[str, Any] , ) -> BatchEncoding:
"""simple docstring"""
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , __A , )
if max_length is None:
SCREAMING_SNAKE_CASE__ = self.current_tokenizer.model_max_length
SCREAMING_SNAKE_CASE__ = self(
__A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
SCREAMING_SNAKE_CASE__ = self.current_tokenizer.model_max_length
SCREAMING_SNAKE_CASE__ = self(
text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , )
SCREAMING_SNAKE_CASE__ = labels["""input_ids"""]
return model_inputs | 6 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "encoder-decoder"
lowerCamelCase_ = True
def __init__( self :Optional[int] , **__A :str ) -> int:
"""simple docstring"""
super().__init__(**__A )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = True
@classmethod
def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A )
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.encoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.decoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output | 6 | 1 |
import string
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = """"""
for i in sequence:
SCREAMING_SNAKE_CASE__ = ord(UpperCamelCase__ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = string.ascii_letters
SCREAMING_SNAKE_CASE__ = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(UpperCamelCase__ )] if c in letters else c for c in sequence )
def SCREAMING_SNAKE_CASE__ ( ):
from timeit import timeit
print("""Running performance benchmarks...""" )
SCREAMING_SNAKE_CASE__ = """from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'''> atbash_slow(): {timeit('atbash_slow(printable)' , setup=UpperCamelCase__ )} seconds''' )
print(f'''> atbash(): {timeit('atbash(printable)' , setup=UpperCamelCase__ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'''{example} encrypted in atbash: {atbash(example)}''')
benchmark() | 6 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class UpperCamelCase_ ( UpperCamelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase_ = Features({"text": Value("string" )} )
lowerCamelCase_ = Features({"labels": ClassLabel} )
lowerCamelCase_ = "text"
lowerCamelCase_ = "labels"
def _snake_case ( self :Any , __A :Dict ) -> Optional[Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ = self.label_schema.copy()
SCREAMING_SNAKE_CASE__ = features[self.label_column]
SCREAMING_SNAKE_CASE__ = label_schema
return task_template
@property
def _snake_case ( self :str ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
} | 6 | 1 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[dict, list, tuple, torch.Tensor] ):
SCREAMING_SNAKE_CASE__ = []
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for v in tree.values():
shapes.extend(_fetch_dims(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Tuple[int, ...] ):
SCREAMING_SNAKE_CASE__ = []
for d in reversed(UpperCamelCase__ ):
idx.append(flat_idx % d )
SCREAMING_SNAKE_CASE__ = flat_idx // d
return tuple(reversed(UpperCamelCase__ ) )
@torch.jit.ignore
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Sequence[int] , UpperCamelCase__: Sequence[int] , UpperCamelCase__: Sequence[int] , UpperCamelCase__: Optional[Sequence[bool]] = None , UpperCamelCase__: Optional[Sequence[bool]] = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(UpperCamelCase__: List[bool] ) -> None:
SCREAMING_SNAKE_CASE__ = True
for i in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE__ = -1 * (i + 1)
l[reversed_idx] &= tally
SCREAMING_SNAKE_CASE__ = l[reversed_idx]
if start_edges is None:
SCREAMING_SNAKE_CASE__ = [s == 0 for s in start]
reduce_edge_list(UpperCamelCase__ )
if end_edges is None:
SCREAMING_SNAKE_CASE__ = [e == (d - 1) for e, d in zip(UpperCamelCase__ , UpperCamelCase__ )]
reduce_edge_list(UpperCamelCase__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(UpperCamelCase__ ) == 0:
return [()]
elif len(UpperCamelCase__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(UpperCamelCase__ , UpperCamelCase__ ):
if s == e:
path_list.append(slice(UpperCamelCase__ , s + 1 ) )
else:
break
SCREAMING_SNAKE_CASE__ = tuple(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ )
# start == end, and we're done
if divergence_idx == len(UpperCamelCase__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE__ = start[divergence_idx]
return tuple(
path + (slice(UpperCamelCase__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE__ = end[divergence_idx]
return tuple(
path + (slice(UpperCamelCase__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
SCREAMING_SNAKE_CASE__ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: torch.Tensor , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = t.shape[:no_batch_dims]
SCREAMING_SNAKE_CASE__ = list(_flat_idx_to_idx(UpperCamelCase__ , UpperCamelCase__ ) )
# _get_minimal_slice_set is inclusive
SCREAMING_SNAKE_CASE__ = list(_flat_idx_to_idx(flat_end - 1 , UpperCamelCase__ ) )
# Get an ordered list of slices to perform
SCREAMING_SNAKE_CASE__ = _get_minimal_slice_set(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Callable , UpperCamelCase__: Dict[str, Any] , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: bool = False , UpperCamelCase__: Any = None , UpperCamelCase__: bool = False , ):
if not (len(UpperCamelCase__ ) > 0):
raise ValueError("""Must provide at least one input""" )
SCREAMING_SNAKE_CASE__ = [shape[:no_batch_dims] for shape in _fetch_dims(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ = tuple([max(UpperCamelCase__ ) for s in zip(*UpperCamelCase__ )] )
def _prep_inputs(UpperCamelCase__: torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
SCREAMING_SNAKE_CASE__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
SCREAMING_SNAKE_CASE__ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
SCREAMING_SNAKE_CASE__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
SCREAMING_SNAKE_CASE__ = tensor_tree_map(_prep_inputs , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = None
if _out is not None:
SCREAMING_SNAKE_CASE__ = tensor_tree_map(lambda UpperCamelCase__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
SCREAMING_SNAKE_CASE__ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
SCREAMING_SNAKE_CASE__ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(UpperCamelCase__: torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = prepped_outputs
for _ in range(UpperCamelCase__ ):
# Chunk the input
if not low_mem:
SCREAMING_SNAKE_CASE__ = _select_chunk
else:
SCREAMING_SNAKE_CASE__ = partial(
_chunk_slice , flat_start=UpperCamelCase__ , flat_end=min(UpperCamelCase__ , i + chunk_size ) , no_batch_dims=len(UpperCamelCase__ ) , )
SCREAMING_SNAKE_CASE__ = tensor_tree_map(UpperCamelCase__ , UpperCamelCase__ )
# Run the layer on the chunk
SCREAMING_SNAKE_CASE__ = layer(**UpperCamelCase__ )
# Allocate space for the output
if out is None:
SCREAMING_SNAKE_CASE__ = tensor_tree_map(lambda UpperCamelCase__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , UpperCamelCase__ )
# Put the chunk in its pre-allocated space
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
def assign(UpperCamelCase__: dict , UpperCamelCase__: dict ) -> None:
for k, v in da.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
assign(UpperCamelCase__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
SCREAMING_SNAKE_CASE__ = da[k]
assign(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for xa, xa in zip(UpperCamelCase__ , UpperCamelCase__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
SCREAMING_SNAKE_CASE__ = xa
elif isinstance(UpperCamelCase__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
SCREAMING_SNAKE_CASE__ = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
SCREAMING_SNAKE_CASE__ = tensor_tree_map(lambda UpperCamelCase__ : t.view(orig_batch_dims + t.shape[1:] ) , UpperCamelCase__ )
return out
class UpperCamelCase_ :
def __init__( self :Optional[Any] , __A :int = 512 , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = max_chunk_size
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
def _snake_case ( self :Dict , __A :Callable , __A :tuple , __A :int ) -> int:
"""simple docstring"""
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
SCREAMING_SNAKE_CASE__ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
SCREAMING_SNAKE_CASE__ = [c for c in candidates if c > min_chunk_size]
SCREAMING_SNAKE_CASE__ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__A :int ) -> bool:
try:
with torch.no_grad():
fn(*__A , chunk_size=__A )
return True
except RuntimeError:
return False
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = len(__A ) - 1
while i > min_viable_chunk_size_index:
SCREAMING_SNAKE_CASE__ = test_chunk_size(candidates[i] )
if not viable:
SCREAMING_SNAKE_CASE__ = (min_viable_chunk_size_index + i) // 2
else:
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = (i + len(__A ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self :Dict , __A :Iterable , __A :Iterable ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = True
for aa, aa in zip(__A , __A ):
assert type(__A ) == type(__A )
if isinstance(__A , (list, tuple) ):
consistent &= self._compare_arg_caches(__A , __A )
elif isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = [v for _, v in sorted(aa.items() , key=lambda __A : x[0] )]
SCREAMING_SNAKE_CASE__ = [v for _, v in sorted(aa.items() , key=lambda __A : x[0] )]
consistent &= self._compare_arg_caches(__A , __A )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self :int , __A :Callable , __A :tuple , __A :int , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = tree_map(lambda __A : a.shape if isinstance(__A , torch.Tensor ) else a , __A , __A )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__A )
SCREAMING_SNAKE_CASE__ = self._compare_arg_caches(self.cached_arg_data , __A )
else:
# Otherwise, we can reuse the precomputed value
SCREAMING_SNAKE_CASE__ = False
if not consistent:
SCREAMING_SNAKE_CASE__ = self._determine_favorable_chunk_size(
__A , __A , __A , )
SCREAMING_SNAKE_CASE__ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size | 6 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = model.config
SCREAMING_SNAKE_CASE__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
SCREAMING_SNAKE_CASE__ = MBartConfig(
is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
if "encoder.model" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
SCREAMING_SNAKE_CASE__ = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE__ = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE__ = """encoder.layernorm.bias"""
return name
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
SCREAMING_SNAKE_CASE__ = key.split(""".""" )
SCREAMING_SNAKE_CASE__ = int(key_split[3] )
SCREAMING_SNAKE_CASE__ = int(key_split[5] )
SCREAMING_SNAKE_CASE__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ = val[:dim, :]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE__ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ = val[:dim]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
SCREAMING_SNAKE_CASE__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int=None , UpperCamelCase__: str=False ):
# load original model
SCREAMING_SNAKE_CASE__ = DonutModel.from_pretrained(UpperCamelCase__ ).eval()
# load HuggingFace model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_configs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = DonutSwinModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = original_model.state_dict()
SCREAMING_SNAKE_CASE__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify results on scanned document
SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/example-documents""" )
SCREAMING_SNAKE_CASE__ = dataset["""test"""][0]["""image"""].convert("""RGB""" )
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
SCREAMING_SNAKE_CASE__ = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
SCREAMING_SNAKE_CASE__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
SCREAMING_SNAKE_CASE__ = """When is the coffee break?"""
SCREAMING_SNAKE_CASE__ = task_prompt.replace("""{user_input}""" , UpperCamelCase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
SCREAMING_SNAKE_CASE__ = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
SCREAMING_SNAKE_CASE__ = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
SCREAMING_SNAKE_CASE__ = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
SCREAMING_SNAKE_CASE__ = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
SCREAMING_SNAKE_CASE__ = """hello world"""
else:
raise ValueError("""Model name not supported""" )
SCREAMING_SNAKE_CASE__ = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[
"""input_ids"""
]
SCREAMING_SNAKE_CASE__ = original_model.encoder.model.patch_embed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.encoder.embeddings(UpperCamelCase__ )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
# verify encoder hidden states
SCREAMING_SNAKE_CASE__ = original_model.encoder(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = model.encoder(UpperCamelCase__ ).last_hidden_state
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
# verify decoder hidden states
SCREAMING_SNAKE_CASE__ = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
_lowerCamelCase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 6 | 1 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase_ ( unittest.TestCase , UpperCamelCase__ ):
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_tool("""text-to-speech""" )
self.tool.setup()
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = self.tool("""hey""" )
SCREAMING_SNAKE_CASE__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = self.tool("""hey""" )
SCREAMING_SNAKE_CASE__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) ) | 6 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__A , )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 6 | 1 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCamelCase = logging.get_logger(__name__)
# General docstring
_lowerCamelCase = 'PoolFormerConfig'
# Base docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = [1, 512, 7, 7]
# Image classification docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = 'tabby, tabby cat'
_lowerCamelCase = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ):
if drop_prob == 0.0 or not training:
return input
SCREAMING_SNAKE_CASE__ = 1 - drop_prob
SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor
return output
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = drop_prob
def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return drop_path(__A , self.drop_prob , self.training )
def _snake_case ( self :Dict ) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size)
SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride)
SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding)
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A )
SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity()
def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.projection(__A )
SCREAMING_SNAKE_CASE__ = self.norm(__A )
return embeddings
class UpperCamelCase_ ( nn.GroupNorm ):
def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict:
"""simple docstring"""
super().__init__(1 , __A , **__A )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :List[str] , __A :Optional[int] ) -> Any:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A )
def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.pool(__A ) - hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A )
if isinstance(config.hidden_act , __A ):
SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE__ = config.hidden_act
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.act_fn(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
# Useful for training neural nets
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity()
SCREAMING_SNAKE_CASE__ = config.use_layer_scale
if config.use_layer_scale:
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str:
"""simple docstring"""
if self.use_layer_scale:
SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = ()
SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
else:
SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) )
# First residual connection
SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states
SCREAMING_SNAKE_CASE__ = ()
# Second residual connection inside the PoolFormerOutput block
SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) )
SCREAMING_SNAKE_CASE__ = hidden_states + layer_output
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = config
# stochastic depth decay rule
SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
SCREAMING_SNAKE_CASE__ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
# Transformer blocks
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
SCREAMING_SNAKE_CASE__ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__A ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None
SCREAMING_SNAKE_CASE__ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers
# Get patch embeddings from hidden_states
SCREAMING_SNAKE_CASE__ = embedding_layer(__A )
# Send the embeddings through the blocks
for _, blk in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = blk(__A )
SCREAMING_SNAKE_CASE__ = layer_outputs[0]
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PoolFormerConfig
lowerCamelCase_ = "poolformer"
lowerCamelCase_ = "pixel_values"
lowerCamelCase_ = True
def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict:
"""simple docstring"""
if isinstance(__A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any:
"""simple docstring"""
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = value
_lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Any ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A )
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
SCREAMING_SNAKE_CASE__ = self.encoder(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :int , __A :Optional[int] ) -> Tuple:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size )
def _snake_case ( self :List[Any] , __A :Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dense(__A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :str , __A :Union[str, Any] ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config.num_labels
SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A )
# Final norm
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
SCREAMING_SNAKE_CASE__ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = self.poolformer(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = outputs[0]
SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) )
SCREAMING_SNAKE_CASE__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE__ = """single_label_classification"""
else:
SCREAMING_SNAKE_CASE__ = """multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE__ = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
if not return_dict:
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states ) | 6 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ):
try:
SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
while i * i <= n:
while n % i == 0:
SCREAMING_SNAKE_CASE__ = i
n //= i
i += 1
if n > 1:
SCREAMING_SNAKE_CASE__ = n
return int(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''') | 6 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCamelCase = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) )
SCREAMING_SNAKE_CASE__ = self.diffusers_dir
shutil.copy(
os.path.join(__A , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , )
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def _snake_case ( self :List[Any] , __A :Optional[int] , __A :List[str] , __A :Dict , __A :int=None ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE__ = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
SCREAMING_SNAKE_CASE__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
SCREAMING_SNAKE_CASE__ = black.format_str(__A , mode=__A )
SCREAMING_SNAKE_CASE__ = os.path.join(self.diffusers_dir , """new_code.py""" )
with open(__A , """w""" , newline="""\n""" ) as f:
f.write(__A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__A )
with open(__A , """r""" ) as f:
self.assertTrue(f.read() , __A )
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(__A , __A )
def _snake_case ( self :List[str] ) -> Any:
"""simple docstring"""
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , __A , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , __A ) , )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE__ = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , f'''{long_class_name}SchedulerOutput''' , re.sub("""Bert""" , __A , __A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , __A , overwrite_result=re.sub("""DDPM""" , """Test""" , __A ) , ) | 6 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__A ) )
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(__A ) )
def _snake_case ( self :Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) ) | 6 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "pegasus"
lowerCamelCase_ = ["past_key_values"]
lowerCamelCase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self :Any , __A :Optional[int]=5_0265 , __A :int=1024 , __A :Union[str, Any]=12 , __A :Union[str, Any]=4096 , __A :Tuple=16 , __A :Union[str, Any]=12 , __A :Tuple=4096 , __A :int=16 , __A :List[Any]=0.0 , __A :Tuple=0.0 , __A :Dict=True , __A :List[str]=True , __A :Optional[int]="gelu" , __A :int=1024 , __A :Optional[Any]=0.1 , __A :List[Any]=0.0 , __A :str=0.0 , __A :List[str]=0.0_2 , __A :str=0 , __A :str=False , __A :List[Any]=0 , __A :int=1 , __A :str=1 , **__A :Tuple , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = encoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = encoder_layerdrop
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , )
@property
def _snake_case ( self :int ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _snake_case ( self :str ) -> int:
"""simple docstring"""
return self.d_model | 6 |
import argparse
import datetime
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
SCREAMING_SNAKE_CASE__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCamelCase__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
SCREAMING_SNAKE_CASE__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
SCREAMING_SNAKE_CASE__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
SCREAMING_SNAKE_CASE__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
SCREAMING_SNAKE_CASE__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
SCREAMING_SNAKE_CASE__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
SCREAMING_SNAKE_CASE__ = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) )
# Start math
if m <= 2:
SCREAMING_SNAKE_CASE__ = y - 1
SCREAMING_SNAKE_CASE__ = m + 12
# maths var
SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[:2] )
SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[2:] )
SCREAMING_SNAKE_CASE__ = int(2.6 * m - 5.3_9 )
SCREAMING_SNAKE_CASE__ = int(c / 4 )
SCREAMING_SNAKE_CASE__ = int(k / 4 )
SCREAMING_SNAKE_CASE__ = int(d + k )
SCREAMING_SNAKE_CASE__ = int(t + u + v + x )
SCREAMING_SNAKE_CASE__ = int(z - (2 * c) )
SCREAMING_SNAKE_CASE__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
SCREAMING_SNAKE_CASE__ = f'''Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
_lowerCamelCase = parser.parse_args()
zeller(args.date_input) | 6 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_lowerCamelCase = None
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
_lowerCamelCase = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
_lowerCamelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = ["input_ids", "attention_mask"]
lowerCamelCase_ = MBartTokenizer
lowerCamelCase_ = []
lowerCamelCase_ = []
def __init__( self :int , __A :Optional[Any]=None , __A :Union[str, Any]=None , __A :Optional[Any]="<s>" , __A :int="</s>" , __A :Tuple="</s>" , __A :int="<s>" , __A :str="<unk>" , __A :str="<pad>" , __A :Dict="<mask>" , __A :Optional[Any]=None , __A :int=None , __A :Tuple=None , **__A :Optional[int] , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
vocab_file=__A , tokenizer_file=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , src_lang=__A , tgt_lang=__A , additional_special_tokens=__A , **__A , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
SCREAMING_SNAKE_CASE__ = {
lang_code: self.convert_tokens_to_ids(__A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE__ = src_lang if src_lang is not None else """en_XX"""
SCREAMING_SNAKE_CASE__ = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _snake_case ( self :Union[str, Any] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self :Tuple , __A :str ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self :Any , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self :Any , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self :List[str] , __A :Dict , __A :str , __A :Optional[str] , __A :Optional[str] , **__A :int ) -> Dict:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
SCREAMING_SNAKE_CASE__ = src_lang
SCREAMING_SNAKE_CASE__ = self(__A , add_special_tokens=__A , return_tensors=__A , **__A )
SCREAMING_SNAKE_CASE__ = self.convert_tokens_to_ids(__A )
SCREAMING_SNAKE_CASE__ = tgt_lang_id
return inputs
def _snake_case ( self :Optional[int] , __A :List[str] , __A :str = "en_XX" , __A :Optional[List[str]] = None , __A :str = "ro_RO" , **__A :Optional[int] , ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = src_lang
SCREAMING_SNAKE_CASE__ = tgt_lang
return super().prepare_seqaseq_batch(__A , __A , **__A )
def _snake_case ( self :List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self :Optional[int] ) -> List[Any]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self :Any , __A :Tuple ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.convert_tokens_to_ids(__A )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self :Any , __A :str ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.convert_tokens_to_ids(__A )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self :str , __A :str , __A :Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,) | 6 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_lowerCamelCase = logging.getLogger(__name__)
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=30522, type=int)
_lowerCamelCase = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, 'rb') as fp:
_lowerCamelCase = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
_lowerCamelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
_lowerCamelCase = [0] * args.vocab_size
for k, v in counter.items():
_lowerCamelCase = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) | 6 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "marian"
lowerCamelCase_ = ["past_key_values"]
lowerCamelCase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self :Tuple , __A :Union[str, Any]=5_8101 , __A :Dict=None , __A :str=1024 , __A :Optional[int]=12 , __A :Optional[int]=4096 , __A :Union[str, Any]=16 , __A :List[Any]=12 , __A :int=4096 , __A :Dict=16 , __A :Optional[Any]=0.0 , __A :Any=0.0 , __A :str=True , __A :int=True , __A :Union[str, Any]="gelu" , __A :str=1024 , __A :List[Any]=0.1 , __A :Tuple=0.0 , __A :str=0.0 , __A :Dict=0.0_2 , __A :Tuple=5_8100 , __A :Optional[Any]=False , __A :Dict=5_8100 , __A :int=0 , __A :str=0 , __A :Union[str, Any]=True , **__A :Dict , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = decoder_vocab_size or vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = encoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = encoder_layerdrop
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , )
class UpperCamelCase_ ( UpperCamelCase__ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _snake_case ( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ = {0: """batch"""}
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """decoder_sequence"""}
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__A , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.num_layers
for i in range(__A ):
SCREAMING_SNAKE_CASE__ = {0: """batch""", 2: """past_sequence + sequence"""}
SCREAMING_SNAKE_CASE__ = {0: """batch""", 2: """past_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _snake_case ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ = super().outputs
else:
SCREAMING_SNAKE_CASE__ = super(__A , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.num_layers
for i in range(__A ):
SCREAMING_SNAKE_CASE__ = {0: """batch""", 2: """past_sequence + sequence"""}
SCREAMING_SNAKE_CASE__ = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _snake_case ( self :List[Any] , __A :PreTrainedTokenizer , __A :int = -1 , __A :int = -1 , __A :bool = False , __A :Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._generate_dummy_inputs_for_encoder_and_decoder(
__A , __A , __A , __A , __A )
# Generate decoder inputs
SCREAMING_SNAKE_CASE__ = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE__ = self._generate_dummy_inputs_for_encoder_and_decoder(
__A , __A , __A , __A , __A )
SCREAMING_SNAKE_CASE__ = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE__ = dict(**__A , **__A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = common_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE__ = common_inputs["""decoder_input_ids"""].shape[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.num_attention_heads
SCREAMING_SNAKE_CASE__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ = decoder_seq_length + 3
SCREAMING_SNAKE_CASE__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__A , __A )] , dim=1 )
SCREAMING_SNAKE_CASE__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.num_layers
SCREAMING_SNAKE_CASE__ = min(__A , __A )
SCREAMING_SNAKE_CASE__ = max(__A , __A ) - min_num_layers
SCREAMING_SNAKE_CASE__ = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE__ = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__A , __A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def _snake_case ( self :Dict , __A :PreTrainedTokenizer , __A :int = -1 , __A :int = -1 , __A :bool = False , __A :Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._generate_dummy_inputs_for_encoder_and_decoder(
__A , __A , __A , __A , __A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ = seqlen + 2
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.num_layers
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.num_attention_heads
SCREAMING_SNAKE_CASE__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ = common_inputs["""attention_mask"""].dtype
SCREAMING_SNAKE_CASE__ = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__A , __A , dtype=__A )] , dim=1 )
SCREAMING_SNAKE_CASE__ = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def _snake_case ( self :int , __A :PreTrainedTokenizer , __A :int = -1 , __A :int = -1 , __A :bool = False , __A :Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ = tokenizer.num_special_tokens_to_add(__A )
SCREAMING_SNAKE_CASE__ = compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ = dict(tokenizer(__A , return_tensors=__A ) )
return common_inputs
def _snake_case ( self :Optional[Any] , __A :PreTrainedTokenizer , __A :int = -1 , __A :int = -1 , __A :bool = False , __A :Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
else:
SCREAMING_SNAKE_CASE__ = self._generate_dummy_inputs_for_causal_lm(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
return common_inputs
def _snake_case ( self :Union[str, Any] , __A :List[Any] , __A :Tuple , __A :Dict , __A :Any ) -> str:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ = super()._flatten_past_key_values_(__A , __A , __A , __A )
else:
SCREAMING_SNAKE_CASE__ = super(__A , self )._flatten_past_key_values_(
__A , __A , __A , __A )
@property
def _snake_case ( self :Tuple ) -> float:
"""simple docstring"""
return 1E-4 | 6 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = b.T
SCREAMING_SNAKE_CASE__ = np.sum(np.square(UpperCamelCase__ ) , axis=1 )
SCREAMING_SNAKE_CASE__ = np.sum(np.square(UpperCamelCase__ ) , axis=0 )
SCREAMING_SNAKE_CASE__ = np.matmul(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = x.reshape(-1 , 3 )
SCREAMING_SNAKE_CASE__ = squared_euclidean_distance(UpperCamelCase__ , UpperCamelCase__ )
return np.argmin(UpperCamelCase__ , axis=1 )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["pixel_values"]
def __init__( self :int , __A :Optional[Union[List[List[int]], np.ndarray]] = None , __A :bool = True , __A :Dict[str, int] = None , __A :PILImageResampling = PILImageResampling.BILINEAR , __A :bool = True , __A :bool = True , **__A :int , ) -> None:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ = size if size is not None else {"""height""": 256, """width""": 256}
SCREAMING_SNAKE_CASE__ = get_size_dict(__A )
SCREAMING_SNAKE_CASE__ = np.array(__A ) if clusters is not None else None
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = resample
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = do_color_quantize
def _snake_case ( self :str , __A :np.ndarray , __A :Dict[str, int] , __A :PILImageResampling = PILImageResampling.BILINEAR , __A :Optional[Union[str, ChannelDimension]] = None , **__A :List[str] , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
__A , size=(size["""height"""], size["""width"""]) , resample=__A , data_format=__A , **__A )
def _snake_case ( self :List[Any] , __A :np.ndarray , __A :Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = rescale(image=__A , scale=1 / 1_2_7.5 , data_format=__A )
SCREAMING_SNAKE_CASE__ = image - 1
return image
def _snake_case ( self :Optional[int] , __A :ImageInput , __A :bool = None , __A :Dict[str, int] = None , __A :PILImageResampling = None , __A :bool = None , __A :Optional[bool] = None , __A :Optional[Union[List[List[int]], np.ndarray]] = None , __A :Optional[Union[str, TensorType]] = None , __A :Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **__A :List[str] , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ = get_size_dict(__A )
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE__ = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE__ = np.array(__A )
SCREAMING_SNAKE_CASE__ = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(__A ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ = [self.normalize(image=__A ) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(__A , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE__ = np.array(__A )
SCREAMING_SNAKE_CASE__ = color_quantize(__A , __A ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE__ = images.shape[0]
SCREAMING_SNAKE_CASE__ = images.reshape(__A , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE__ = list(__A )
else:
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(__A , __A ) for image in images]
SCREAMING_SNAKE_CASE__ = {"""input_ids""": images}
return BatchFeature(data=__A , tensor_type=__A ) | 6 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "OwlViTImageProcessor"
lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self :Optional[Any] , __A :int=None , __A :Optional[int]=None , **__A :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__A , __A )
def __call__( self :str , __A :Dict=None , __A :List[str]=None , __A :str=None , __A :Optional[int]="max_length" , __A :Tuple="np" , **__A :int ) -> Tuple:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )):
SCREAMING_SNAKE_CASE__ = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )]
elif isinstance(__A , __A ) and isinstance(text[0] , __A ):
SCREAMING_SNAKE_CASE__ = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE__ = max([len(__A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__A ) != max_num_queries:
SCREAMING_SNAKE_CASE__ = t + [""" """] * (max_num_queries - len(__A ))
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )
encodings.append(__A )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = input_ids
SCREAMING_SNAKE_CASE__ = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = self.image_processor(
__A , return_tensors=__A , **__A ).pixel_values
SCREAMING_SNAKE_CASE__ = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :List[Any] , *__A :Dict , **__A :Dict ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process(*__A , **__A )
def _snake_case ( self :Optional[int] , *__A :Dict , **__A :List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*__A , **__A )
def _snake_case ( self :str , *__A :List[str] , **__A :Union[str, Any] ) -> Any:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*__A , **__A )
def _snake_case ( self :Dict , *__A :List[str] , **__A :List[str] ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :Dict , *__A :Dict , **__A :List[str] ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , )
return self.image_processor_class
@property
def _snake_case ( self :Any ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , )
return self.image_processor | 6 | 1 |
import json
import sys
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int] ):
with open(UpperCamelCase__ , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = results[benchmark_name]
SCREAMING_SNAKE_CASE__ = benchmark_name.split("""/""" )[-1]
output_md.append(f'''### Benchmark: {benchmark_file_name}''' )
SCREAMING_SNAKE_CASE__ = """| metric |"""
SCREAMING_SNAKE_CASE__ = """|--------|"""
SCREAMING_SNAKE_CASE__ = """| new / old (diff) |"""
for metric_name in sorted(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = benchmark_res[metric_name]
SCREAMING_SNAKE_CASE__ = metric_vals["""new"""]
SCREAMING_SNAKE_CASE__ = metric_vals.get("""old""" , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = metric_vals.get("""diff""" , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = f''' {new_val:f}''' if isinstance(UpperCamelCase__ , (int, float) ) else """None"""
if old_val is not None:
val_str += f''' / {old_val:f}''' if isinstance(UpperCamelCase__ , (int, float) ) else "None"
if dif_val is not None:
val_str += f''' ({dif_val:f})''' if isinstance(UpperCamelCase__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(UpperCamelCase__ ) )
if __name__ == "__main__":
_lowerCamelCase = sys.argv[1]
_lowerCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file) | 6 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :Union[str, Any] , __A :int = 3 , __A :int = 3 , __A :Tuple[str] = ("DownEncoderBlock2D",) , __A :Tuple[str] = ("UpDecoderBlock2D",) , __A :Tuple[int] = (64,) , __A :int = 1 , __A :str = "silu" , __A :int = 3 , __A :int = 32 , __A :int = 256 , __A :int = 32 , __A :Optional[int] = None , __A :float = 0.1_8_2_1_5 , __A :str = "group" , ) -> Any:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE__ = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
SCREAMING_SNAKE_CASE__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = VectorQuantizer(__A , __A , beta=0.2_5 , remap=__A , sane_index_shape=__A )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
# pass init params to Decoder
SCREAMING_SNAKE_CASE__ = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , )
@apply_forward_hook
def _snake_case ( self :Union[str, Any] , __A :torch.FloatTensor , __A :bool = True ) -> VQEncoderOutput:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.encoder(__A )
SCREAMING_SNAKE_CASE__ = self.quant_conv(__A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__A )
@apply_forward_hook
def _snake_case ( self :Tuple , __A :torch.FloatTensor , __A :bool = False , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if not force_not_quantize:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.quantize(__A )
else:
SCREAMING_SNAKE_CASE__ = h
SCREAMING_SNAKE_CASE__ = self.post_quant_conv(__A )
SCREAMING_SNAKE_CASE__ = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
def _snake_case ( self :int , __A :torch.FloatTensor , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = sample
SCREAMING_SNAKE_CASE__ = self.encode(__A ).latents
SCREAMING_SNAKE_CASE__ = self.decode(__A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A ) | 6 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "AutoImageProcessor"
lowerCamelCase_ = "AutoTokenizer"
def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict:
"""simple docstring"""
super().__init__(__A , __A )
SCREAMING_SNAKE_CASE__ = self.image_processor
def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"] | 6 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
lowerCamelCase_ = jnp.floataa
lowerCamelCase_ = True
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE__ = nn.Dense(5 , dtype=self.dtype )
def __call__( self :List[Any] , *__A :int , **__A :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super().__call__(*__A , **__A )
SCREAMING_SNAKE_CASE__ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = FlaxBigBirdForNaturalQuestionsModule
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
def cross_entropy(UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str]=None ):
SCREAMING_SNAKE_CASE__ = logits.shape[-1]
SCREAMING_SNAKE_CASE__ = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype("""f4""" )
SCREAMING_SNAKE_CASE__ = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 )
SCREAMING_SNAKE_CASE__ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
SCREAMING_SNAKE_CASE__ = reduction(UpperCamelCase__ )
return loss
SCREAMING_SNAKE_CASE__ = partial(UpperCamelCase__ , reduction=jnp.mean )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = "google/bigbird-roberta-base"
lowerCamelCase_ = 30_00
lowerCamelCase_ = 1_05_00
lowerCamelCase_ = 1_28
lowerCamelCase_ = 3
lowerCamelCase_ = 1
lowerCamelCase_ = 5
# tx_args
lowerCamelCase_ = 3e-5
lowerCamelCase_ = 0.0
lowerCamelCase_ = 2_00_00
lowerCamelCase_ = 0.0095
lowerCamelCase_ = "bigbird-roberta-natural-questions"
lowerCamelCase_ = "training-expt"
lowerCamelCase_ = "data/nq-training.jsonl"
lowerCamelCase_ = "data/nq-validation.jsonl"
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=__A )
SCREAMING_SNAKE_CASE__ = os.path.join(self.base_dir , self.save_dir )
SCREAMING_SNAKE_CASE__ = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = 40_96 # no dynamic padding on TPUs
def __call__( self :Optional[Any] , __A :Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.collate_fn(__A )
SCREAMING_SNAKE_CASE__ = jax.tree_util.tree_map(__A , __A )
return batch
def _snake_case ( self :List[Any] , __A :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.fetch_inputs(features["""input_ids"""] )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": jnp.array(__A , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(__A , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def _snake_case ( self :Tuple , __A :list ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self._fetch_inputs(__A ) for ids in input_ids]
return zip(*__A )
def _snake_case ( self :List[str] , __A :list ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [1 for _ in range(len(__A ) )]
while len(__A ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any]=None ):
if seed is not None:
SCREAMING_SNAKE_CASE__ = dataset.shuffle(seed=UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) // batch_size ):
SCREAMING_SNAKE_CASE__ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase__ )
@partial(jax.pmap , axis_name="""batch""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ):
def loss_fn(UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs
return state.loss_fn(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = jax.random.split(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = jax.value_and_grad(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = grad_fn(state.params )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean(UpperCamelCase__ , """batch""" )
SCREAMING_SNAKE_CASE__ = state.apply_gradients(grads=UpperCamelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs
SCREAMING_SNAKE_CASE__ = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class UpperCamelCase_ ( train_state.TrainState ):
lowerCamelCase_ = struct.field(pytree_node=UpperCamelCase__ )
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = None
def _snake_case ( self :List[Any] , __A :str , __A :str , __A :str , __A :Tuple=None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = model.params
SCREAMING_SNAKE_CASE__ = TrainState.create(
apply_fn=model.__call__ , params=__A , tx=__A , loss_fn=__A , )
if ckpt_dir is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = restore_checkpoint(__A , __A )
SCREAMING_SNAKE_CASE__ = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = build_tx(**__A )
SCREAMING_SNAKE_CASE__ = train_state.TrainState(
step=__A , apply_fn=model.__call__ , params=__A , tx=__A , opt_state=__A , )
SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = data_collator
SCREAMING_SNAKE_CASE__ = lr
SCREAMING_SNAKE_CASE__ = params
SCREAMING_SNAKE_CASE__ = jax_utils.replicate(__A )
return state
def _snake_case ( self :Optional[Any] , __A :Optional[int] , __A :int , __A :int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.args
SCREAMING_SNAKE_CASE__ = len(__A ) // args.batch_size
SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ = jax.random.split(__A , jax.device_count() )
for epoch in range(args.max_epochs ):
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , args.batch_size , seed=__A )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(__A , total=__A , desc=f'''Running EPOCH-{epoch}''' ):
SCREAMING_SNAKE_CASE__ = self.data_collator(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.train_step_fn(__A , __A , **__A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(state.step )
SCREAMING_SNAKE_CASE__ = running_loss.item() / i
SCREAMING_SNAKE_CASE__ = self.scheduler_fn(state_step - 1 )
SCREAMING_SNAKE_CASE__ = self.evaluate(__A , __A )
SCREAMING_SNAKE_CASE__ = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(__A ) )
self.logger.log(__A , commit=__A )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=__A )
def _snake_case ( self :List[str] , __A :Dict , __A :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , self.args.batch_size )
SCREAMING_SNAKE_CASE__ = len(__A ) // self.args.batch_size
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(__A , total=__A , desc="""Evaluating ... """ ):
SCREAMING_SNAKE_CASE__ = self.data_collator(__A )
SCREAMING_SNAKE_CASE__ = self.val_step_fn(__A , **__A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _snake_case ( self :List[Any] , __A :Any , __A :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(__A )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ )
self.model_save_fn(__A , params=state.params )
with open(os.path.join(__A , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__A , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(__A , """data_collator.joblib""" ) )
with open(os.path.join(__A , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , __A )
print("""DONE""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ):
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ )
with open(os.path.join(UpperCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.opt_state , f.read() )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """args.joblib""" ) )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) )
with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """r""" ) as f:
SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = num_train_steps - warmup_steps
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1e-7 , transition_steps=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ):
def weight_decay_mask(UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ )
return tx, lr | 6 | 1 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = (IPNDMScheduler,)
lowerCamelCase_ = (("num_inference_steps", 50),)
def _snake_case ( self :int , **__A :Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {"""num_train_timesteps""": 1000}
config.update(**__A )
return config
def _snake_case ( self :Union[str, Any] , __A :int=0 , **__A :str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""num_inference_steps""" , __A )
SCREAMING_SNAKE_CASE__ = self.dummy_sample
SCREAMING_SNAKE_CASE__ = 0.1 * sample
SCREAMING_SNAKE_CASE__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(**__A )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE__ = dummy_past_residuals[:]
if time_step is None:
SCREAMING_SNAKE_CASE__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
SCREAMING_SNAKE_CASE__ = scheduler_class.from_pretrained(__A )
new_scheduler.set_timesteps(__A )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE__ = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE__ = scheduler.step(__A , __A , __A , **__A ).prev_sample
SCREAMING_SNAKE_CASE__ = new_scheduler.step(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE__ = scheduler.step(__A , __A , __A , **__A ).prev_sample
SCREAMING_SNAKE_CASE__ = new_scheduler.step(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self :List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _snake_case ( self :Optional[Any] , __A :Union[str, Any]=0 , **__A :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""num_inference_steps""" , __A )
SCREAMING_SNAKE_CASE__ = self.dummy_sample
SCREAMING_SNAKE_CASE__ = 0.1 * sample
SCREAMING_SNAKE_CASE__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE__ = dummy_past_residuals[:]
if time_step is None:
SCREAMING_SNAKE_CASE__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
SCREAMING_SNAKE_CASE__ = scheduler_class.from_pretrained(__A )
# copy over dummy past residuals
new_scheduler.set_timesteps(__A )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE__ = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE__ = scheduler.step(__A , __A , __A , **__A ).prev_sample
SCREAMING_SNAKE_CASE__ = new_scheduler.step(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE__ = scheduler.step(__A , __A , __A , **__A ).prev_sample
SCREAMING_SNAKE_CASE__ = new_scheduler.step(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self :List[Any] , **__A :int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(**__A )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__A )
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
scheduler.set_timesteps(__A )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ = model(__A , __A )
SCREAMING_SNAKE_CASE__ = scheduler.step(__A , __A , __A ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ = model(__A , __A )
SCREAMING_SNAKE_CASE__ = scheduler.step(__A , __A , __A ).prev_sample
return sample
def _snake_case ( self :Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""num_inference_steps""" , __A )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__A )
SCREAMING_SNAKE_CASE__ = self.dummy_sample
SCREAMING_SNAKE_CASE__ = 0.1 * sample
if num_inference_steps is not None and hasattr(__A , """set_timesteps""" ):
scheduler.set_timesteps(__A )
elif num_inference_steps is not None and not hasattr(__A , """set_timesteps""" ):
SCREAMING_SNAKE_CASE__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
SCREAMING_SNAKE_CASE__ = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE__ = scheduler.timesteps[5]
SCREAMING_SNAKE_CASE__ = scheduler.timesteps[6]
SCREAMING_SNAKE_CASE__ = scheduler.step(__A , __A , __A , **__A ).prev_sample
SCREAMING_SNAKE_CASE__ = scheduler.step(__A , __A , __A , **__A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
SCREAMING_SNAKE_CASE__ = scheduler.step(__A , __A , __A , **__A ).prev_sample
SCREAMING_SNAKE_CASE__ = scheduler.step(__A , __A , __A , **__A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__A , time_step=__A )
def _snake_case ( self :Optional[Any] ) -> Tuple:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__A , time_step=__A )
def _snake_case ( self :Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.full_loop()
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 254_0529 ) < 10 | 6 |
from torch import nn
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' ) | 6 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "table-transformer"
lowerCamelCase_ = ["past_key_values"]
lowerCamelCase_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self :Any , __A :int=True , __A :List[str]=None , __A :Tuple=3 , __A :Tuple=100 , __A :Union[str, Any]=6 , __A :Tuple=2048 , __A :List[str]=8 , __A :str=6 , __A :List[Any]=2048 , __A :Dict=8 , __A :str=0.0 , __A :List[str]=0.0 , __A :Tuple=True , __A :Tuple="relu" , __A :Dict=256 , __A :str=0.1 , __A :str=0.0 , __A :int=0.0 , __A :Union[str, Any]=0.0_2 , __A :List[str]=1.0 , __A :List[Any]=False , __A :List[Any]="sine" , __A :Dict="resnet50" , __A :str=True , __A :str=False , __A :Tuple=1 , __A :str=5 , __A :Optional[int]=2 , __A :Union[str, Any]=1 , __A :Union[str, Any]=1 , __A :Union[str, Any]=5 , __A :List[Any]=2 , __A :Tuple=0.1 , **__A :str , ) -> Union[str, Any]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = backbone_config.get("""model_type""" )
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ = config_class.from_dict(__A )
# set timm attributes to None
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None, None, None
SCREAMING_SNAKE_CASE__ = use_timm_backbone
SCREAMING_SNAKE_CASE__ = backbone_config
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = num_queries
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = encoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = init_xavier_std
SCREAMING_SNAKE_CASE__ = encoder_layerdrop
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = auxiliary_loss
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = backbone
SCREAMING_SNAKE_CASE__ = use_pretrained_backbone
SCREAMING_SNAKE_CASE__ = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE__ = class_cost
SCREAMING_SNAKE_CASE__ = bbox_cost
SCREAMING_SNAKE_CASE__ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ = mask_loss_coefficient
SCREAMING_SNAKE_CASE__ = dice_loss_coefficient
SCREAMING_SNAKE_CASE__ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ = eos_coefficient
super().__init__(is_encoder_decoder=__A , **__A )
@property
def _snake_case ( self :Tuple ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _snake_case ( self :Optional[Any] ) -> int:
"""simple docstring"""
return self.d_model
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = version.parse("1.11" )
@property
def _snake_case ( self :Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _snake_case ( self :int ) -> float:
"""simple docstring"""
return 1E-5
@property
def _snake_case ( self :Optional[int] ) -> int:
"""simple docstring"""
return 12 | 6 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 847
SCREAMING_SNAKE_CASE__ = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 150
SCREAMING_SNAKE_CASE__ = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 171
SCREAMING_SNAKE_CASE__ = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
SCREAMING_SNAKE_CASE__ = 133
SCREAMING_SNAKE_CASE__ = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 19
SCREAMING_SNAKE_CASE__ = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 65
SCREAMING_SNAKE_CASE__ = """mapillary-vistas-id2label.json"""
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] ):
SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = val
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: bool = False ):
SCREAMING_SNAKE_CASE__ = get_maskformer_config(UpperCamelCase__ )
# load original state_dict
with open(UpperCamelCase__ , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# update to torch tensors
for key, value in state_dict.items():
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
# load 🤗 model
SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(UpperCamelCase__ )
model.eval()
for name, param in model.named_parameters():
print(UpperCamelCase__ , param.shape )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCamelCase__ ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
SCREAMING_SNAKE_CASE__ = prepare_img()
if "vistas" in model_name:
SCREAMING_SNAKE_CASE__ = 65
elif "cityscapes" in model_name:
SCREAMING_SNAKE_CASE__ = 65_535
else:
SCREAMING_SNAKE_CASE__ = 255
SCREAMING_SNAKE_CASE__ = True if """ade""" in model_name else False
SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=UpperCamelCase__ , reduce_labels=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase__ )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCamelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 6 | 1 |
import math
import sys
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
if number != int(UpperCamelCase__ ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
SCREAMING_SNAKE_CASE__ = [-1] * (number + 1)
SCREAMING_SNAKE_CASE__ = 0
for i in range(1 , number + 1 ):
SCREAMING_SNAKE_CASE__ = sys.maxsize
SCREAMING_SNAKE_CASE__ = int(math.sqrt(UpperCamelCase__ ) )
for j in range(1 , root + 1 ):
SCREAMING_SNAKE_CASE__ = 1 + answers[i - (j**2)]
SCREAMING_SNAKE_CASE__ = min(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowerCamelCase = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowerCamelCase = 0
_lowerCamelCase = 0XE0_00
_lowerCamelCase = 0XE0_01
_lowerCamelCase = 0XE0_02
_lowerCamelCase = 0XE0_03
_lowerCamelCase = 0XE0_04
# Maps special codepoints to human-readable names.
_lowerCamelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :str , __A :str=chr(__A ) , __A :str=chr(__A ) , __A :Dict=chr(__A ) , __A :str=chr(__A ) , __A :Union[str, Any]=chr(__A ) , __A :str=chr(__A ) , __A :int=False , __A :int=2048 , **__A :Dict , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE__ = len(self._special_codepoints )
@property
def _snake_case ( self :Optional[Any] ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def _snake_case ( self :Tuple , __A :str ) -> List[str]:
"""simple docstring"""
return list(__A )
def _snake_case ( self :Optional[Any] , __A :str ) -> int:
"""simple docstring"""
try:
return ord(__A )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def _snake_case ( self :str , __A :int ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__A )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Any:
"""simple docstring"""
return "".join(__A )
def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
result += ([0] * len(__A )) + [1]
return result
def _snake_case ( self :List[str] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Any:
"""simple docstring"""
return () | 6 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Tuple , __A :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = data
def __iter__( self :Union[str, Any] ) -> int:
"""simple docstring"""
for element in self.data:
yield element
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=True ):
SCREAMING_SNAKE_CASE__ = Accelerator(even_batches=UpperCamelCase__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Accelerator , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: bool = False ):
if iterable:
SCREAMING_SNAKE_CASE__ = DummyIterableDataset(torch.as_tensor(range(UpperCamelCase__ ) ) )
else:
SCREAMING_SNAKE_CASE__ = TensorDataset(torch.as_tensor(range(UpperCamelCase__ ) ) )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
return dl
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Accelerator , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: List[int] , UpperCamelCase__: List[int] , ):
SCREAMING_SNAKE_CASE__ = create_dataloader(accelerator=UpperCamelCase__ , dataset_size=UpperCamelCase__ , batch_size=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator(even_batches=UpperCamelCase__ )
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator(even_batches=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE__ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = ddp_model(batch[0].float() )
SCREAMING_SNAKE_CASE__ = output.sum()
loss.backward()
batch_idxs.append(UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
with warnings.catch_warnings(record=UpperCamelCase__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , UpperCamelCase__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = create_accelerator(even_batches=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE__ = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = train_dl.batch_sampler.even_batches
SCREAMING_SNAKE_CASE__ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = create_accelerator(even_batches=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator()
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCamelCase__ )
with warnings.catch_warnings(record=UpperCamelCase__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase__ ):
pass
assert issubclass(w[-1].category , UpperCamelCase__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
SCREAMING_SNAKE_CASE__ = accelerator.state.distributed_type
SCREAMING_SNAKE_CASE__ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = original_state
if __name__ == "__main__":
main() | 6 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_lowerCamelCase = 'bert-base-cased'
_lowerCamelCase = 'fp16'
_lowerCamelCase = 'bf16'
_lowerCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def _snake_case ( self :List[Any] ) -> Tuple:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = f'''{i + 1}'''
SCREAMING_SNAKE_CASE__ = strategy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = prefetch_policy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = state_dict_type
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A )
for policy in FSDP_AUTO_WRAP_POLICY:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = policy
if policy == "TRANSFORMER_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """2000"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """T5Layer"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
with self.assertRaises(__A ) as cm:
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """0"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _snake_case ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = mp_dtype
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = Accelerator()
if mp_dtype == "fp16":
SCREAMING_SNAKE_CASE__ = torch.floataa
elif mp_dtype == "bf16":
SCREAMING_SNAKE_CASE__ = torch.bfloataa
SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__A )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = str(__A ).lower()
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = 0.8_2
SCREAMING_SNAKE_CASE__ = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
SCREAMING_SNAKE_CASE__ = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def _snake_case ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" )
SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
SCREAMING_SNAKE_CASE__ = cmd.copy()
for i, strategy in enumerate(__A ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
SCREAMING_SNAKE_CASE__ = len(__A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
SCREAMING_SNAKE_CASE__ = cmd_config[:-1]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
SCREAMING_SNAKE_CASE__ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(__A ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() ) | 6 | 1 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = emb.weight.shape
SCREAMING_SNAKE_CASE__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase__ , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
SCREAMING_SNAKE_CASE__ = mam_aaa["""model"""]
remove_ignore_keys_(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = state_dict["""encoder.embed_tokens.weight"""].shape[0]
SCREAMING_SNAKE_CASE__ = MaMaaaConfig(
vocab_size=UpperCamelCase__ , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
SCREAMING_SNAKE_CASE__ = state_dict["""decoder.embed_tokens.weight"""]
SCREAMING_SNAKE_CASE__ = MaMaaaForConditionalGeneration(UpperCamelCase__ )
model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path) | 6 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCamelCase = logging.get_logger(__name__)
# General docstring
_lowerCamelCase = 'PoolFormerConfig'
# Base docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = [1, 512, 7, 7]
# Image classification docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = 'tabby, tabby cat'
_lowerCamelCase = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ):
if drop_prob == 0.0 or not training:
return input
SCREAMING_SNAKE_CASE__ = 1 - drop_prob
SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor
return output
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = drop_prob
def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return drop_path(__A , self.drop_prob , self.training )
def _snake_case ( self :Dict ) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size)
SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride)
SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding)
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A )
SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity()
def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.projection(__A )
SCREAMING_SNAKE_CASE__ = self.norm(__A )
return embeddings
class UpperCamelCase_ ( nn.GroupNorm ):
def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict:
"""simple docstring"""
super().__init__(1 , __A , **__A )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :List[str] , __A :Optional[int] ) -> Any:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A )
def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.pool(__A ) - hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A )
if isinstance(config.hidden_act , __A ):
SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE__ = config.hidden_act
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.act_fn(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
# Useful for training neural nets
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity()
SCREAMING_SNAKE_CASE__ = config.use_layer_scale
if config.use_layer_scale:
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str:
"""simple docstring"""
if self.use_layer_scale:
SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = ()
SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
else:
SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) )
# First residual connection
SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states
SCREAMING_SNAKE_CASE__ = ()
# Second residual connection inside the PoolFormerOutput block
SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) )
SCREAMING_SNAKE_CASE__ = hidden_states + layer_output
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = config
# stochastic depth decay rule
SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
SCREAMING_SNAKE_CASE__ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
# Transformer blocks
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
SCREAMING_SNAKE_CASE__ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__A ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None
SCREAMING_SNAKE_CASE__ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers
# Get patch embeddings from hidden_states
SCREAMING_SNAKE_CASE__ = embedding_layer(__A )
# Send the embeddings through the blocks
for _, blk in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = blk(__A )
SCREAMING_SNAKE_CASE__ = layer_outputs[0]
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PoolFormerConfig
lowerCamelCase_ = "poolformer"
lowerCamelCase_ = "pixel_values"
lowerCamelCase_ = True
def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict:
"""simple docstring"""
if isinstance(__A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any:
"""simple docstring"""
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = value
_lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Any ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A )
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
SCREAMING_SNAKE_CASE__ = self.encoder(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :int , __A :Optional[int] ) -> Tuple:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size )
def _snake_case ( self :List[Any] , __A :Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dense(__A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :str , __A :Union[str, Any] ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config.num_labels
SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A )
# Final norm
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
SCREAMING_SNAKE_CASE__ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = self.poolformer(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = outputs[0]
SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) )
SCREAMING_SNAKE_CASE__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE__ = """single_label_classification"""
else:
SCREAMING_SNAKE_CASE__ = """multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE__ = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
if not return_dict:
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states ) | 6 | 1 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_lowerCamelCase = 'bert-base-cased'
_lowerCamelCase = 'fp16'
_lowerCamelCase = 'bf16'
_lowerCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def _snake_case ( self :List[Any] ) -> Tuple:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = f'''{i + 1}'''
SCREAMING_SNAKE_CASE__ = strategy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = prefetch_policy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = state_dict_type
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A )
for policy in FSDP_AUTO_WRAP_POLICY:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = policy
if policy == "TRANSFORMER_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """2000"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """T5Layer"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
with self.assertRaises(__A ) as cm:
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """0"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _snake_case ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = mp_dtype
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = Accelerator()
if mp_dtype == "fp16":
SCREAMING_SNAKE_CASE__ = torch.floataa
elif mp_dtype == "bf16":
SCREAMING_SNAKE_CASE__ = torch.bfloataa
SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__A )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = str(__A ).lower()
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = 0.8_2
SCREAMING_SNAKE_CASE__ = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
SCREAMING_SNAKE_CASE__ = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def _snake_case ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" )
SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
SCREAMING_SNAKE_CASE__ = cmd.copy()
for i, strategy in enumerate(__A ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
SCREAMING_SNAKE_CASE__ = len(__A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
SCREAMING_SNAKE_CASE__ = cmd_config[:-1]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
SCREAMING_SNAKE_CASE__ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(__A ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() ) | 6 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_lengths
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = gelu_activation
SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings
SCREAMING_SNAKE_CASE__ = causal
SCREAMING_SNAKE_CASE__ = asm
SCREAMING_SNAKE_CASE__ = n_langs
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_special
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = summary_type
SCREAMING_SNAKE_CASE__ = use_proj
SCREAMING_SNAKE_CASE__ = scope
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 )
def _snake_case ( self :int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__A )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__A )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__A )
@slow
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(config=__A )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE__ = torch.jit.trace(
__A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) )
SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A )
loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__A )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) ) | 6 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = LongformerTokenizer
lowerCamelCase_ = True
lowerCamelCase_ = LongformerTokenizerFast
lowerCamelCase_ = True
def _snake_case ( self :int ) -> Tuple:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE__ = dict(zip(__A , range(len(__A ) ) ) )
SCREAMING_SNAKE_CASE__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
SCREAMING_SNAKE_CASE__ = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__A ) )
def _snake_case ( self :str , **__A :Tuple ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self :Union[str, Any] , **__A :str ) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self :List[Any] , __A :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """lower newer"""
SCREAMING_SNAKE_CASE__ = """lower newer"""
return input_text, output_text
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ = """lower newer"""
SCREAMING_SNAKE_CASE__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__A ) # , add_prefix_space=True)
self.assertListEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def _snake_case ( self :int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__A ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__A ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def _snake_case ( self :Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__A , add_prefix_space=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__A , add_prefix_space=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _snake_case ( self :List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = """Encode this sequence."""
SCREAMING_SNAKE_CASE__ = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE__ = tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__A , __A )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__A , __A )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE__ = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__A , lstrip=__A , rstrip=__A )} ) # mask token has a left space
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(__A )
SCREAMING_SNAKE_CASE__ = """Encode <mask> sequence"""
SCREAMING_SNAKE_CASE__ = """Encode <mask>sequence"""
SCREAMING_SNAKE_CASE__ = tokenizer.encode(__A )
SCREAMING_SNAKE_CASE__ = encoded.index(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(__A )
SCREAMING_SNAKE_CASE__ = encoded.index(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__A , __A )
def _snake_case ( self :Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
def _snake_case ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = """A, <mask> AllenNLP sentence."""
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
SCREAMING_SNAKE_CASE__ = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
SCREAMING_SNAKE_CASE__ = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def _snake_case ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __A )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __A )
self.assertEqual(post_processor_state["""trim_offsets"""] , __A )
def _snake_case ( self :Optional[int] ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE__ = f'''{text_of_1_token} {text_of_1_token}'''
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ) + 1, len(__A ) + 1 + len(__A )) , )
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ) + 1, len(__A ) + 1 + len(__A )) , )
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ), len(__A ) + 1 + len(__A )) , )
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ), len(__A ) + 1 + len(__A )) , )
SCREAMING_SNAKE_CASE__ = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ) + 1, 1 + len(__A ) + 1 + len(__A )) , )
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) , )
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , add_prefix_space=__A , trim_offsets=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) , ) | 6 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple=True ):
model.train()
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any]=False ):
set_seed(42 )
SCREAMING_SNAKE_CASE__ = RegressionModel()
SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
# Test when on a single CPU or GPU that the context manager does nothing
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
# Test on distributed setup that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=False , UpperCamelCase__: Union[str, Any]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if iteration < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if batch_num < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(UpperCamelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(UpperCamelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 6 | 1 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase_ :
def __init__( self :List[Any] , __A :List[str] , __A :Tuple=13 , __A :List[str]=30 , __A :List[Any]=2 , __A :Optional[Any]=3 , __A :Dict=True , __A :Any=True , __A :int=32 , __A :Any=5 , __A :str=4 , __A :str=37 , __A :Optional[Any]="gelu" , __A :str=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=10 , __A :Dict=0.0_2 , __A :Union[str, Any]=3 , __A :Union[str, Any]=0.6 , __A :Union[str, Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = mask_ratio
SCREAMING_SNAKE_CASE__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _snake_case ( self :List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def _snake_case ( self :Tuple ) -> Any:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _snake_case ( self :Optional[int] , __A :Any , __A :Tuple , __A :Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ViTMAEModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self :Tuple , __A :int , __A :Dict , __A :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = (self.image_size // self.patch_size) ** 2
SCREAMING_SNAKE_CASE__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCamelCase_ = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def _snake_case ( self :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ViTMAEModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def _snake_case ( self :int ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def _snake_case ( self :Dict ) -> List[str]:
"""simple docstring"""
pass
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def _snake_case ( self :List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__A )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def _snake_case ( self :Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def _snake_case ( self :List[Any] , __A :List[str] , __A :Union[str, Any] , __A :List[str] ) -> Tuple:
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(__A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
SCREAMING_SNAKE_CASE__ = pt_noise
super().check_pt_tf_models(__A , __A , __A )
def _snake_case ( self :Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__A )
model.to(__A )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__A , __A ) )
SCREAMING_SNAKE_CASE__ = outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(__A )
model.to(__A )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__A , __A ) )
# Make sure we don't have nans
SCREAMING_SNAKE_CASE__ = after_outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A , 1E-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _snake_case ( self :List[Any] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _snake_case ( self :Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def _snake_case ( self :Dict ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self :Any ) -> int:
"""simple docstring"""
pass
@slow
def _snake_case ( self :Optional[int] ) -> int:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = ViTMAEModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def _snake_case ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def _snake_case ( self :str ) -> str:
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(__A )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__A , return_tensors="""pt""" ).to(__A )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
SCREAMING_SNAKE_CASE__ = ViTMAEConfig()
SCREAMING_SNAKE_CASE__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__A , noise=torch.from_numpy(__A ).to(device=__A ) )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , __A )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__A ) , atol=1E-4 ) ) | 6 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "AutoImageProcessor"
lowerCamelCase_ = "AutoTokenizer"
def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict:
"""simple docstring"""
super().__init__(__A , __A )
SCREAMING_SNAKE_CASE__ = self.image_processor
def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"] | 6 | 1 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: int=False ):
try:
SCREAMING_SNAKE_CASE__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
SCREAMING_SNAKE_CASE__ = default
else:
# KEY is set, convert it to True or False.
try:
SCREAMING_SNAKE_CASE__ = strtobool(UpperCamelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
_lowerCamelCase = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
return unittest.skip("""Test was skipped""" )(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict ):
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict ):
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] ):
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any]=None , UpperCamelCase__: str=None ):
if test_case is None:
return partial(UpperCamelCase__ , version=UpperCamelCase__ )
return unittest.skipUnless(is_torch_version(""">=""" , UpperCamelCase__ ) , f'''test requires torch version >= {version}''' )(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict ):
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict ):
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(UpperCamelCase__ )
_lowerCamelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(UpperCamelCase__ )
class UpperCamelCase_ ( unittest.TestCase ):
lowerCamelCase_ = True
@classmethod
def _snake_case ( cls :int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
@classmethod
def _snake_case ( cls :Optional[int] ) -> Tuple:
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _snake_case ( self :Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__A )
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Dict , __A :Union[mock.Mock, List[mock.Mock]] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = mocks if isinstance(__A , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = AcceleratorState()
SCREAMING_SNAKE_CASE__ = tensor[None].clone().to(state.device )
SCREAMING_SNAKE_CASE__ = gather(UpperCamelCase__ ).cpu()
SCREAMING_SNAKE_CASE__ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , UpperCamelCase__ ):
return False
return True
class UpperCamelCase_ :
def __init__( self :Any , __A :Optional[Any] , __A :int , __A :Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = returncode
SCREAMING_SNAKE_CASE__ = stdout
SCREAMING_SNAKE_CASE__ = stderr
async def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: int ):
while True:
SCREAMING_SNAKE_CASE__ = await stream.readline()
if line:
callback(UpperCamelCase__ )
else:
break
async def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Tuple=None , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: str=None , UpperCamelCase__: Tuple=False , UpperCamelCase__: Union[str, Any]=False ):
if echo:
print("""\nRunning: """ , """ """.join(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=UpperCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=UpperCamelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
def tee(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Dict , UpperCamelCase__: str="" ):
SCREAMING_SNAKE_CASE__ = line.decode("""utf-8""" ).rstrip()
sink.append(UpperCamelCase__ )
if not quiet:
print(UpperCamelCase__ , UpperCamelCase__ , file=UpperCamelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda UpperCamelCase__ : tee(UpperCamelCase__ , UpperCamelCase__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda UpperCamelCase__ : tee(UpperCamelCase__ , UpperCamelCase__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=UpperCamelCase__ , )
return _RunOutput(await p.wait() , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: List[Any]=None , UpperCamelCase__: Union[str, Any]=180 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: str=True ):
SCREAMING_SNAKE_CASE__ = asyncio.get_event_loop()
SCREAMING_SNAKE_CASE__ = loop.run_until_complete(
_stream_subprocess(UpperCamelCase__ , env=UpperCamelCase__ , stdin=UpperCamelCase__ , timeout=UpperCamelCase__ , quiet=UpperCamelCase__ , echo=UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ = """ """.join(UpperCamelCase__ )
if result.returncode > 0:
SCREAMING_SNAKE_CASE__ = """\n""".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class UpperCamelCase_ ( UpperCamelCase__ ):
pass
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: List[Any]=False ):
try:
SCREAMING_SNAKE_CASE__ = subprocess.check_output(UpperCamelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(UpperCamelCase__ , """decode""" ):
SCREAMING_SNAKE_CASE__ = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{' '.join(UpperCamelCase__ )}` failed with the following error:\n\n{e.output.decode()}''' ) from e | 6 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ = True
for i in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE__ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE__ = dp[i][j - 1]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE__ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
SCREAMING_SNAKE_CASE__ = s - 2 * j
break
return diff | 6 | 1 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = SMALL_MODEL_IDENTIFIER
SCREAMING_SNAKE_CASE__ = """pt"""
SCREAMING_SNAKE_CASE__ = """tf"""
def _snake_case ( self :List[Any] , __A :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__A )
def _snake_case ( self :Optional[int] , __A :Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFAutoModel.from_pretrained(self.test_model , from_pt=__A )
model_tf.save_pretrained(__A )
def _snake_case ( self :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """mock_framework"""
# Framework provided - return whatever the user provides
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(self.test_model , __A )
self.assertEqual(__A , __A )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
def _snake_case ( self :Union[str, Any] ) -> str:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__A ):
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(__A )
def _snake_case ( self :Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MagicMock(return_value=__A )
with patch("""transformers.onnx.features.is_tf_available""" , __A ):
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
SCREAMING_SNAKE_CASE__ = MagicMock(return_value=__A )
with patch("""transformers.onnx.features.is_torch_available""" , __A ):
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_tf )
# Both in environment -> use PyTorch
SCREAMING_SNAKE_CASE__ = MagicMock(return_value=__A )
SCREAMING_SNAKE_CASE__ = MagicMock(return_value=__A )
with patch("""transformers.onnx.features.is_tf_available""" , __A ), patch(
"""transformers.onnx.features.is_torch_available""" , __A ):
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# Both not in environment -> raise error
SCREAMING_SNAKE_CASE__ = MagicMock(return_value=__A )
SCREAMING_SNAKE_CASE__ = MagicMock(return_value=__A )
with patch("""transformers.onnx.features.is_tf_available""" , __A ), patch(
"""transformers.onnx.features.is_torch_available""" , __A ):
with self.assertRaises(__A ):
SCREAMING_SNAKE_CASE__ = FeaturesManager.determine_framework(self.test_model ) | 6 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ):
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 6 | 1 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
SCREAMING_SNAKE_CASE__ = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
SCREAMING_SNAKE_CASE__ = json.loads(UpperCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
SCREAMING_SNAKE_CASE__ = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
SCREAMING_SNAKE_CASE__ = json.loads(UpperCamelCase__ )
if not mpi_options.get("""sagemaker_mpi_enabled""" , UpperCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , __A , )
@cached_property
def _snake_case ( self :List[Any] ) -> "torch.device":
"""simple docstring"""
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
SCREAMING_SNAKE_CASE__ = torch.device("""cpu""" )
SCREAMING_SNAKE_CASE__ = 0
elif is_sagemaker_model_parallel_available():
SCREAMING_SNAKE_CASE__ = smp.local_rank()
SCREAMING_SNAKE_CASE__ = torch.device("""cuda""" , __A )
SCREAMING_SNAKE_CASE__ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
SCREAMING_SNAKE_CASE__ = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
SCREAMING_SNAKE_CASE__ = torch.device("""cuda""" , self.local_rank )
SCREAMING_SNAKE_CASE__ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
SCREAMING_SNAKE_CASE__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
SCREAMING_SNAKE_CASE__ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
SCREAMING_SNAKE_CASE__ = torch.device("""cuda""" , self.local_rank )
SCREAMING_SNAKE_CASE__ = 1
if device.type == "cuda":
torch.cuda.set_device(__A )
return device
@property
def _snake_case ( self :Tuple ) -> int:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _snake_case ( self :int ) -> int:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
return False | 6 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "encoder-decoder"
lowerCamelCase_ = True
def __init__( self :Optional[int] , **__A :str ) -> int:
"""simple docstring"""
super().__init__(**__A )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = True
@classmethod
def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A )
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.encoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.decoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output | 6 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'sentencepiece.model'}
_lowerCamelCase = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowerCamelCase = {
'google/rembert': 256,
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :Optional[int] , __A :Any , __A :int=False , __A :Any=True , __A :List[Any]=True , __A :int="[CLS]" , __A :Optional[Any]="[SEP]" , __A :Union[str, Any]="[UNK]" , __A :List[Any]="[SEP]" , __A :Union[str, Any]="[PAD]" , __A :List[str]="[CLS]" , __A :Dict="[MASK]" , **__A :List[str] , ) -> Tuple:
"""simple docstring"""
super().__init__(
do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = remove_space
SCREAMING_SNAKE_CASE__ = keep_accents
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor()
self.sp_model.Load(__A )
@property
def _snake_case ( self :Optional[int] ) -> Optional[int]:
"""simple docstring"""
return len(self.sp_model )
def _snake_case ( self :Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
return state
def __setstate__( self :Dict , __A :Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = d
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def _snake_case ( self :Union[str, Any] , __A :Dict , __A :Optional[Any]=False ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.sp_model.EncodeAsPieces(__A )
return pieces
def _snake_case ( self :str , __A :int ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(__A )
def _snake_case ( self :Optional[Any] , __A :List[str] ) -> int:
"""simple docstring"""
return self.sp_model.IdToPiece(__A )
def _snake_case ( self :Optional[int] , __A :Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.sp_model.decode_pieces(__A )
return out_string
def _snake_case ( self :int , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1]
def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self :Optional[Any] , __A :str , __A :Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__A ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__A ) )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,) | 6 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class UpperCamelCase_ ( UpperCamelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase_ = Features({"text": Value("string" )} )
lowerCamelCase_ = Features({"labels": ClassLabel} )
lowerCamelCase_ = "text"
lowerCamelCase_ = "labels"
def _snake_case ( self :Any , __A :Dict ) -> Optional[Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ = self.label_schema.copy()
SCREAMING_SNAKE_CASE__ = features[self.label_column]
SCREAMING_SNAKE_CASE__ = label_schema
return task_template
@property
def _snake_case ( self :str ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
} | 6 | 1 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
_lowerCamelCase = logging.getLogger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "token-classification"
def __init__( self :Dict , __A :int ) -> Union[str, Any]:
"""simple docstring"""
if type(__A ) == dict:
SCREAMING_SNAKE_CASE__ = Namespace(**__A )
SCREAMING_SNAKE_CASE__ = import_module("""tasks""" )
try:
SCREAMING_SNAKE_CASE__ = getattr(__A , hparams.task_type )
SCREAMING_SNAKE_CASE__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
SCREAMING_SNAKE_CASE__ = self.token_classification_task.get_labels(hparams.labels )
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss().ignore_index
super().__init__(__A , len(self.labels ) , self.mode )
def _snake_case ( self :Tuple , **__A :Optional[Any] ) -> List[str]:
"""simple docstring"""
return self.model(**__A )
def _snake_case ( self :int , __A :List[Any] , __A :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE__ = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE__ = self(**__A )
SCREAMING_SNAKE_CASE__ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _snake_case ( self :List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.hparams
for mode in ["train", "dev", "test"]:
SCREAMING_SNAKE_CASE__ = self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __A )
SCREAMING_SNAKE_CASE__ = torch.load(__A )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
SCREAMING_SNAKE_CASE__ = self.token_classification_task.read_examples_from_file(args.data_dir , __A )
SCREAMING_SNAKE_CASE__ = self.token_classification_task.convert_examples_to_features(
__A , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__A , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , __A )
torch.save(__A , __A )
def _snake_case ( self :Any , __A :int , __A :int , __A :bool = False ) -> DataLoader:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._feature_file(__A )
logger.info("""Loading features from cached file %s""" , __A )
SCREAMING_SNAKE_CASE__ = torch.load(__A )
SCREAMING_SNAKE_CASE__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
SCREAMING_SNAKE_CASE__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
SCREAMING_SNAKE_CASE__ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
SCREAMING_SNAKE_CASE__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A )
def _snake_case ( self :List[Any] , __A :Optional[Any] , __A :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
"""Compute validation""" ""
SCREAMING_SNAKE_CASE__ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE__ = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE__ = self(**__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs[:2]
SCREAMING_SNAKE_CASE__ = logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE__ = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _snake_case ( self :Optional[int] , __A :List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
SCREAMING_SNAKE_CASE__ = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE__ = np.argmax(__A , axis=2 )
SCREAMING_SNAKE_CASE__ = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE__ = dict(enumerate(self.labels ) )
SCREAMING_SNAKE_CASE__ = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE__ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
SCREAMING_SNAKE_CASE__ = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(__A , __A ),
"""precision""": precision_score(__A , __A ),
"""recall""": recall_score(__A , __A ),
"""f1""": fa_score(__A , __A ),
}
SCREAMING_SNAKE_CASE__ = dict(results.items() )
SCREAMING_SNAKE_CASE__ = results
return ret, preds_list, out_label_list
def _snake_case ( self :str , __A :List[str] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._eval_end(__A )
SCREAMING_SNAKE_CASE__ = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _snake_case ( self :Any , __A :List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._eval_end(__A )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
SCREAMING_SNAKE_CASE__ = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _snake_case ( __A :Optional[int] , __A :int ) -> Tuple:
"""simple docstring"""
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=__A , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__A , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=__A , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__A , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
_lowerCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = NERTransformer(args)
_lowerCamelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
_lowerCamelCase = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
_lowerCamelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model) | 6 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = model.config
SCREAMING_SNAKE_CASE__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
SCREAMING_SNAKE_CASE__ = MBartConfig(
is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
if "encoder.model" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
SCREAMING_SNAKE_CASE__ = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE__ = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE__ = """encoder.layernorm.bias"""
return name
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
SCREAMING_SNAKE_CASE__ = key.split(""".""" )
SCREAMING_SNAKE_CASE__ = int(key_split[3] )
SCREAMING_SNAKE_CASE__ = int(key_split[5] )
SCREAMING_SNAKE_CASE__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ = val[:dim, :]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE__ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ = val[:dim]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
SCREAMING_SNAKE_CASE__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int=None , UpperCamelCase__: str=False ):
# load original model
SCREAMING_SNAKE_CASE__ = DonutModel.from_pretrained(UpperCamelCase__ ).eval()
# load HuggingFace model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_configs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = DonutSwinModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = original_model.state_dict()
SCREAMING_SNAKE_CASE__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify results on scanned document
SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/example-documents""" )
SCREAMING_SNAKE_CASE__ = dataset["""test"""][0]["""image"""].convert("""RGB""" )
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
SCREAMING_SNAKE_CASE__ = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
SCREAMING_SNAKE_CASE__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
SCREAMING_SNAKE_CASE__ = """When is the coffee break?"""
SCREAMING_SNAKE_CASE__ = task_prompt.replace("""{user_input}""" , UpperCamelCase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
SCREAMING_SNAKE_CASE__ = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
SCREAMING_SNAKE_CASE__ = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
SCREAMING_SNAKE_CASE__ = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
SCREAMING_SNAKE_CASE__ = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
SCREAMING_SNAKE_CASE__ = """hello world"""
else:
raise ValueError("""Model name not supported""" )
SCREAMING_SNAKE_CASE__ = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[
"""input_ids"""
]
SCREAMING_SNAKE_CASE__ = original_model.encoder.model.patch_embed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.encoder.embeddings(UpperCamelCase__ )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
# verify encoder hidden states
SCREAMING_SNAKE_CASE__ = original_model.encoder(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = model.encoder(UpperCamelCase__ ).last_hidden_state
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
# verify decoder hidden states
SCREAMING_SNAKE_CASE__ = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
_lowerCamelCase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 6 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_lowerCamelCase = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict ):
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
SCREAMING_SNAKE_CASE__ = """lm_head"""
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: int , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == """group""" , )
SCREAMING_SNAKE_CASE__ = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(UpperCamelCase__ )[0].split(""".""" )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace("""*""" , UpperCamelCase__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ = """weight_g"""
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = """weight_v"""
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ = """weight"""
else:
SCREAMING_SNAKE_CASE__ = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] ):
SCREAMING_SNAKE_CASE__ = full_name.split("""conv_layers.""" )[-1]
SCREAMING_SNAKE_CASE__ = name.split(""".""" )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: Any=None , UpperCamelCase__: Optional[int]=True ):
if config_path is not None:
SCREAMING_SNAKE_CASE__ = UniSpeechConfig.from_pretrained(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ = UniSpeechConfig()
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ = Dictionary.load_from_json(UpperCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ = target_dict.pad_index
SCREAMING_SNAKE_CASE__ = target_dict.bos_index
SCREAMING_SNAKE_CASE__ = target_dict.eos_index
SCREAMING_SNAKE_CASE__ = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , """vocab.json""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCamelCase__ ) )
return
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 43
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = WavaVecaPhonemeCTCTokenizer(
UpperCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ = True if config.feat_extract_norm == """layer""" else False
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = UniSpeechForCTC(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ = UniSpeechForPreTraining(UpperCamelCase__ )
if is_finetuned:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
SCREAMING_SNAKE_CASE__ = model[0].eval()
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
hf_unispeech.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_lowerCamelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 6 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__A , )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 6 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_lengths
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = gelu_activation
SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings
SCREAMING_SNAKE_CASE__ = causal
SCREAMING_SNAKE_CASE__ = asm
SCREAMING_SNAKE_CASE__ = n_langs
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_special
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = summary_type
SCREAMING_SNAKE_CASE__ = use_proj
SCREAMING_SNAKE_CASE__ = scope
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 )
def _snake_case ( self :int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__A )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__A )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__A )
@slow
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(config=__A )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE__ = torch.jit.trace(
__A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) )
SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A )
loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__A )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) ) | 6 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ):
try:
SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
while i * i <= n:
while n % i == 0:
SCREAMING_SNAKE_CASE__ = i
n //= i
i += 1
if n > 1:
SCREAMING_SNAKE_CASE__ = n
return int(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''') | 6 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = UniSpeechSatForSequenceClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = downstream_dict["""projector.weight"""]
SCREAMING_SNAKE_CASE__ = downstream_dict["""projector.bias"""]
SCREAMING_SNAKE_CASE__ = downstream_dict["""model.post_net.linear.weight"""]
SCREAMING_SNAKE_CASE__ = downstream_dict["""model.post_net.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: int , UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = UniSpeechSatForAudioFrameClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = downstream_dict["""model.linear.weight"""]
SCREAMING_SNAKE_CASE__ = downstream_dict["""model.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = UniSpeechSatForXVector.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = downstream_dict["""connector.weight"""]
SCREAMING_SNAKE_CASE__ = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE__ = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
SCREAMING_SNAKE_CASE__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
SCREAMING_SNAKE_CASE__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
SCREAMING_SNAKE_CASE__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
SCREAMING_SNAKE_CASE__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
SCREAMING_SNAKE_CASE__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
SCREAMING_SNAKE_CASE__ = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Any , UpperCamelCase__: Dict , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase__ , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ = checkpoint["""Downstream"""]
SCREAMING_SNAKE_CASE__ = UniSpeechSatConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(
UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , do_normalize=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
SCREAMING_SNAKE_CASE__ = convert_classification(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif arch.endswith("""ForAudioFrameClassification""" ):
SCREAMING_SNAKE_CASE__ = convert_diarization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif arch.endswith("""ForXVector""" ):
SCREAMING_SNAKE_CASE__ = convert_xvector(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE__ = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
_lowerCamelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 6 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__A ) )
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(__A ) )
def _snake_case ( self :Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) ) | 6 | 1 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
SCREAMING_SNAKE_CASE__ : Optional[Any] = pytest.mark.integration
SCREAMING_SNAKE_CASE__ : Dict = {"""comet"""}
SCREAMING_SNAKE_CASE__ : Dict = importlib.util.find_spec("""fairseq""") is not None
SCREAMING_SNAKE_CASE__ : Tuple = {"""code_eval"""}
SCREAMING_SNAKE_CASE__ : List[Any] = os.name == """nt"""
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""bertscore""", """frugalscore""", """perplexity"""}
SCREAMING_SNAKE_CASE__ : Dict = importlib.util.find_spec("""transformers""") is not None
def __lowercase ( snake_case ):
"""simple docstring"""
@wraps(snake_case )
def wrapper(self, snake_case ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self, snake_case )
return wrapper
def __lowercase ( snake_case ):
"""simple docstring"""
@wraps(snake_case )
def wrapper(self, snake_case ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self, snake_case )
return wrapper
def __lowercase ( snake_case ):
"""simple docstring"""
@wraps(snake_case )
def wrapper(self, snake_case ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self, snake_case )
return wrapper
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Optional[int] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowerCamelCase , lowerCamelCase , lowerCamelCase )
@local
class lowerCamelCase_ ( parameterized.TestCase ):
a__ = {}
a__ = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Tuple = '''[...]'''
__magic_name__ :int = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , __lowerCAmelCase ) ).module_path )
__magic_name__ :Any = datasets.load.import_main_class(metric_module.__name__ , dataset=__lowerCAmelCase )
# check parameters
__magic_name__ :Tuple = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__lowerCAmelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
__magic_name__ :Any = doctest.testmod(__lowerCAmelCase , verbose=__lowerCAmelCase , raise_on_error=__lowerCAmelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = '''[...]'''
__magic_name__ :Dict = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , __lowerCAmelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
__magic_name__ :int = doctest.testmod(__lowerCAmelCase , verbose=__lowerCAmelCase , raise_on_error=__lowerCAmelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__lowerCAmelCase ):
yield
else:
yield
@contextmanager
def A ( self ):
"""simple docstring"""
def load_local_metric(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
return load_metric(os.path.join('''metrics''' , __lowerCAmelCase ) , *__lowerCAmelCase , **__lowerCAmelCase )
with patch('''datasets.load_metric''' ) as mock_load_metric:
__magic_name__ :Dict = load_local_metric
yield
@classmethod
def A ( cls , __lowerCAmelCase ):
"""simple docstring"""
def wrapper(__lowerCAmelCase ):
__magic_name__ :Union[str, Any] = contextmanager(__lowerCAmelCase )
__magic_name__ :Dict = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def __lowercase ( snake_case ):
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''', '''''', '''''' ) # handle pytest cli flags
class lowerCamelCase_ ( lowerCamelCase ):
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
__magic_name__ :List[Any] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def __lowercase ( snake_case ):
"""simple docstring"""
import torch
def bert_cos_score_idf(snake_case, snake_case, *snake_case, **snake_case ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(snake_case ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
__magic_name__ :Optional[int] = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def __lowercase ( snake_case ):
"""simple docstring"""
def load_from_checkpoint(snake_case ):
class lowerCamelCase_ :
def A ( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
assert len(__lowerCAmelCase ) == 2
__magic_name__ :int = [0.19, 0.92]
return scores, sum(__lowerCAmelCase ) / len(__lowerCAmelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
__magic_name__ :Optional[int] = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
__magic_name__ :Any = load_from_checkpoint
yield
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Optional[int] = load_metric(os.path.join('''metrics''', '''seqeval''' ) )
__magic_name__ :List[str] = '''ERROR'''
__magic_name__ :str = f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(snake_case, match=re.escape(snake_case ) ):
metric.compute(predictions=[], references=[], scheme=snake_case )
| 0 |
import argparse
import datetime
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
SCREAMING_SNAKE_CASE__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCamelCase__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
SCREAMING_SNAKE_CASE__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
SCREAMING_SNAKE_CASE__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
SCREAMING_SNAKE_CASE__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
SCREAMING_SNAKE_CASE__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
SCREAMING_SNAKE_CASE__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
SCREAMING_SNAKE_CASE__ = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) )
# Start math
if m <= 2:
SCREAMING_SNAKE_CASE__ = y - 1
SCREAMING_SNAKE_CASE__ = m + 12
# maths var
SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[:2] )
SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[2:] )
SCREAMING_SNAKE_CASE__ = int(2.6 * m - 5.3_9 )
SCREAMING_SNAKE_CASE__ = int(c / 4 )
SCREAMING_SNAKE_CASE__ = int(k / 4 )
SCREAMING_SNAKE_CASE__ = int(d + k )
SCREAMING_SNAKE_CASE__ = int(t + u + v + x )
SCREAMING_SNAKE_CASE__ = int(z - (2 * c) )
SCREAMING_SNAKE_CASE__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
SCREAMING_SNAKE_CASE__ = f'''Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
_lowerCamelCase = parser.parse_args()
zeller(args.date_input) | 6 | 0 |
from datetime import datetime
import requests
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__UpperCamelCase = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(_lowercase ).content
if __name__ == "__main__":
__snake_case = input('''Enter Video/IGTV url: ''').strip()
__snake_case = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 1 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_lowerCamelCase = logging.getLogger(__name__)
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=30522, type=int)
_lowerCamelCase = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, 'rb') as fp:
_lowerCamelCase = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
_lowerCamelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
_lowerCamelCase = [0] * args.vocab_size
for k, v in counter.items():
_lowerCamelCase = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) | 6 | 0 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCAmelCase_ = False
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = """ybelkada/fonts"""
def SCREAMING_SNAKE_CASE_ ( ) -> int:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict , _snake_case :Optional[int] , _snake_case :int ) -> Any:
requires_backends(_snake_case , ['''torch'''] )
_check_torch_version()
_A = image_tensor.unsqueeze(0 )
_A = torch.nn.functional.unfold(_snake_case , (patch_height, patch_width) , stride=(patch_height, patch_width) )
_A = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , _snake_case , _snake_case , -1 )
_A = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :int = 36 , _snake_case :str = "black" , _snake_case :str = "white" , _snake_case :int = 5 , _snake_case :int = 5 , _snake_case :int = 5 , _snake_case :int = 5 , _snake_case :Optional[bytes] = None , _snake_case :Optional[str] = None , ) -> Image.Image:
requires_backends(_snake_case , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
_A = textwrap.TextWrapper(width=80 )
_A = wrapper.wrap(text=_snake_case )
_A = '''\n'''.join(_snake_case )
if font_bytes is not None and font_path is None:
_A = io.BytesIO(_snake_case )
elif font_path is not None:
_A = font_path
else:
_A = hf_hub_download(_snake_case , '''Arial.TTF''' )
_A = ImageFont.truetype(_snake_case , encoding='''UTF-8''' , size=_snake_case )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_A = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , _snake_case ) )
_A , _A , _A , _A = temp_draw.textbbox((0, 0) , _snake_case , _snake_case )
# Create the actual image with a bit of padding around the text.
_A = text_width + left_padding + right_padding
_A = text_height + top_padding + bottom_padding
_A = Image.new('''RGB''' , (image_width, image_height) , _snake_case )
_A = ImageDraw.Draw(_snake_case )
draw.text(xy=(left_padding, top_padding) , text=_snake_case , fill=_snake_case , font=_snake_case )
return image
def SCREAMING_SNAKE_CASE_ ( _snake_case :np.ndarray , _snake_case :str , **_snake_case :Optional[int] ) -> Any:
requires_backends(_snake_case , '''vision''' )
# Convert to PIL image if necessary
_A = to_pil_image(_snake_case )
_A = render_text(_snake_case , **_snake_case )
_A = max(header_image.width , image.width )
_A = int(image.height * (new_width / image.width) )
_A = int(header_image.height * (new_width / header_image.width) )
_A = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
_A = to_numpy_array(_snake_case )
if infer_channel_dimension_format(_snake_case ) == ChannelDimension.LAST:
_A = to_channel_dimension_format(_snake_case , ChannelDimension.LAST )
return new_image
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Optional[Any] = ["flattened_patches"]
def __init__( self : Any , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : int = 20_48 , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Any , ) -> None:
super().__init__(**__lowerCAmelCase )
_A = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
_A = do_normalize
_A = do_convert_rgb
_A = max_patches
_A = is_vqa
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : int , __lowerCAmelCase : dict , **__lowerCAmelCase : str ) -> np.ndarray:
requires_backends(self.extract_flattened_patches , '''torch''' )
_check_torch_version()
# convert to torch
_A = to_channel_dimension_format(__lowerCAmelCase , ChannelDimension.FIRST )
_A = torch.from_numpy(__lowerCAmelCase )
_A , _A = patch_size['''height'''], patch_size['''width''']
_A , _A = get_image_size(__lowerCAmelCase )
# maximize scale s.t.
_A = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
_A = max(min(math.floor(scale * image_height / patch_height ) , __lowerCAmelCase ) , 1 )
_A = max(min(math.floor(scale * image_width / patch_width ) , __lowerCAmelCase ) , 1 )
_A = max(num_feasible_rows * patch_height , 1 )
_A = max(num_feasible_cols * patch_width , 1 )
_A = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=__lowerCAmelCase , antialias=__lowerCAmelCase , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
_A = torch_extract_patches(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = patches.shape
_A = patches_shape[1]
_A = patches_shape[2]
_A = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_A = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
_A = torch.arange(__lowerCAmelCase ).reshape([rows, 1] ).repeat(1 , __lowerCAmelCase ).reshape([rows * columns, 1] )
_A = torch.arange(__lowerCAmelCase ).reshape([1, columns] ).repeat(__lowerCAmelCase , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_A = row_ids.to(torch.floataa )
_A = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_A = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
_A = torch.nn.functional.pad(__lowerCAmelCase , [0, 0, 0, max_patches - (rows * columns)] ).float()
_A = to_numpy_array(__lowerCAmelCase )
return result
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : str ) -> np.ndarray:
if image.dtype == np.uinta:
_A = image.astype(np.floataa )
# take mean across the whole `image`
_A = np.mean(__lowerCAmelCase )
_A = np.std(__lowerCAmelCase )
_A = max(__lowerCAmelCase , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : ImageInput , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[Dict[str, int]] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCAmelCase : int , ) -> ImageInput:
_A = do_normalize if do_normalize is not None else self.do_normalize
_A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_A = patch_size if patch_size is not None else self.patch_size
_A = max_patches if max_patches is not None else self.max_patches
_A = self.is_vqa
if kwargs.get('''data_format''' , __lowerCAmelCase ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
_A = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_A = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_A = [to_numpy_array(__lowerCAmelCase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
_A = kwargs.pop('''font_bytes''' , __lowerCAmelCase )
_A = kwargs.pop('''font_path''' , __lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = [header_text] * len(__lowerCAmelCase )
_A = [
render_header(__lowerCAmelCase , header_text[i] , font_bytes=__lowerCAmelCase , font_path=__lowerCAmelCase )
for i, image in enumerate(__lowerCAmelCase )
]
if do_normalize:
_A = [self.normalize(image=__lowerCAmelCase ) for image in images]
# convert to torch tensor and permute
_A = [
self.extract_flattened_patches(image=__lowerCAmelCase , max_patches=__lowerCAmelCase , patch_size=__lowerCAmelCase )
for image in images
]
# create attention mask in numpy
_A = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
_A = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=__lowerCAmelCase )
return encoded_outputs
| 2 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = graph
self._normalize_graph(A_ , A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = None
def UpperCAmelCase_ ( self , A_ , A_ )-> Tuple:
'''simple docstring'''
if sources is int:
UpperCamelCase = [sources]
if sinks is int:
UpperCamelCase = [sinks]
if len(A_ ) == 0 or len(A_ ) == 0:
return
UpperCamelCase = sources[0]
UpperCamelCase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(A_ ) > 1 or len(A_ ) > 1:
UpperCamelCase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
UpperCamelCase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
UpperCamelCase = max_input_flow
UpperCamelCase = 0
UpperCamelCase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
UpperCamelCase = max_input_flow
UpperCamelCase = size - 1
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = algorithm(self )
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = flow_network
UpperCamelCase = flow_network.verticesCount
UpperCamelCase = flow_network.sourceIndex
UpperCamelCase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
UpperCamelCase = flow_network.graph
UpperCamelCase = False
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
if not self.executed:
self._algorithm()
UpperCamelCase = True
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
pass
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ )-> List[str]:
'''simple docstring'''
super().__init__(A_ )
# use this to save your result
UpperCamelCase = -1
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = [[0] * self.verticies_count for i in range(self.verticies_count )]
UpperCamelCase = [0] * self.verticies_count
UpperCamelCase = [0] * self.verticies_count
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
UpperCamelCase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
UpperCamelCase = 0
while i < len(A_ ):
UpperCamelCase = vertices_list[i]
UpperCamelCase = self.heights[vertex_index]
self.process_vertex(A_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(A_ ) )
UpperCamelCase = 0
else:
i += 1
UpperCamelCase = sum(self.preflow[self.source_index] )
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(A_ , A_ )
self.relabel(A_ )
def UpperCAmelCase_ ( self , A_ , A_ )-> str:
'''simple docstring'''
UpperCamelCase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCAmelCase_ ( self , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
UpperCamelCase = self.heights[to_index]
if min_height is not None:
UpperCamelCase = min_height + 1
if __name__ == "__main__":
lowerCAmelCase : int = [0]
lowerCAmelCase : Tuple = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowerCAmelCase : List[str] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowerCAmelCase : List[str] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowerCAmelCase : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 3 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "OwlViTImageProcessor"
lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self :Optional[Any] , __A :int=None , __A :Optional[int]=None , **__A :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__A , __A )
def __call__( self :str , __A :Dict=None , __A :List[str]=None , __A :str=None , __A :Optional[int]="max_length" , __A :Tuple="np" , **__A :int ) -> Tuple:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )):
SCREAMING_SNAKE_CASE__ = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )]
elif isinstance(__A , __A ) and isinstance(text[0] , __A ):
SCREAMING_SNAKE_CASE__ = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE__ = max([len(__A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__A ) != max_num_queries:
SCREAMING_SNAKE_CASE__ = t + [""" """] * (max_num_queries - len(__A ))
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )
encodings.append(__A )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = input_ids
SCREAMING_SNAKE_CASE__ = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = self.image_processor(
__A , return_tensors=__A , **__A ).pixel_values
SCREAMING_SNAKE_CASE__ = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :List[Any] , *__A :Dict , **__A :Dict ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process(*__A , **__A )
def _snake_case ( self :Optional[int] , *__A :Dict , **__A :List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*__A , **__A )
def _snake_case ( self :str , *__A :List[str] , **__A :Union[str, Any] ) -> Any:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*__A , **__A )
def _snake_case ( self :Dict , *__A :List[str] , **__A :List[str] ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :Dict , *__A :Dict , **__A :List[str] ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , )
return self.image_processor_class
@property
def _snake_case ( self :Any ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , )
return self.image_processor | 6 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :Union[str, Any] , __A :int = 3 , __A :int = 3 , __A :Tuple[str] = ("DownEncoderBlock2D",) , __A :Tuple[str] = ("UpDecoderBlock2D",) , __A :Tuple[int] = (64,) , __A :int = 1 , __A :str = "silu" , __A :int = 3 , __A :int = 32 , __A :int = 256 , __A :int = 32 , __A :Optional[int] = None , __A :float = 0.1_8_2_1_5 , __A :str = "group" , ) -> Any:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE__ = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
SCREAMING_SNAKE_CASE__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = VectorQuantizer(__A , __A , beta=0.2_5 , remap=__A , sane_index_shape=__A )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
# pass init params to Decoder
SCREAMING_SNAKE_CASE__ = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , )
@apply_forward_hook
def _snake_case ( self :Union[str, Any] , __A :torch.FloatTensor , __A :bool = True ) -> VQEncoderOutput:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.encoder(__A )
SCREAMING_SNAKE_CASE__ = self.quant_conv(__A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__A )
@apply_forward_hook
def _snake_case ( self :Tuple , __A :torch.FloatTensor , __A :bool = False , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if not force_not_quantize:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.quantize(__A )
else:
SCREAMING_SNAKE_CASE__ = h
SCREAMING_SNAKE_CASE__ = self.post_quant_conv(__A )
SCREAMING_SNAKE_CASE__ = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
def _snake_case ( self :int , __A :torch.FloatTensor , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = sample
SCREAMING_SNAKE_CASE__ = self.encode(__A ).latents
SCREAMING_SNAKE_CASE__ = self.decode(__A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A ) | 6 | 0 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def A (__lowerCamelCase :int ):
if isinstance(__lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowercase , _lowercase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(_lowercase )
_lowerCAmelCase = model(input_ids=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowercase , _lowercase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowercase , text_model=_lowercase )
_lowerCAmelCase = model(input_ids=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowercase , _lowercase )
_lowerCAmelCase = {"""vision_model""": vision_model, """text_model""": text_model}
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowercase )
_lowerCAmelCase = model(input_ids=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowercase , _lowercase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowercase , text_model=_lowercase )
_lowerCAmelCase = model(input_ids=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase )
_lowerCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_lowercase )
_lowerCAmelCase = model(input_ids=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase )
_lowerCAmelCase = after_output[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowercase , 1e-5 )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowercase , _lowercase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowercase , text_model=_lowercase )
_lowerCAmelCase = model(
input_ids=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , output_attentions=_lowercase )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_lowercase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(_lowercase , _lowercase , F'Difference between torch and flax is {diff} (>= {tol}).' )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowercase )
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_pretrained_model_and_inputs()
_lowerCAmelCase = model_a(**_lowercase )
_lowerCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowercase )
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_lowercase )
_lowerCAmelCase = model_a(**_lowercase )
_lowerCAmelCase = after_outputs[0].numpy()
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowercase , 1e-5 )
@require_tf
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFViTModel(_lowercase , name="""vision_model""" )
_lowerCAmelCase = TFBertModel(_lowercase , name="""text_model""" )
return vision_model, text_model
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFViTModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_lowercase , _lowercase )
_lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_lowercase , text_model=_lowercase )
_lowerCAmelCase = model(
input_ids=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , output_attentions=_lowercase )
_lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_lowercase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCAmelCase = to_atuple(vision_model.config.image_size )
_lowerCAmelCase = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTModel(_lowercase , name="""vision_model""" )
_lowerCAmelCase = TFRobertaModel(_lowercase , name="""text_model""" )
return vision_model, text_model
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFDeiTModelTester(self )
_lowerCAmelCase = TFRobertaModelTester(self )
_lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
_lowerCAmelCase = 13
_lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase = random_attention_mask([batch_size, 4] )
_lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFCLIPVisionModel(_lowercase , name="""vision_model""" )
_lowerCAmelCase = TFBertModel(_lowercase , name="""text_model""" )
return vision_model, text_model
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFCLIPVisionModelTester(self )
_lowerCAmelCase = TFBertModelTester(self )
_lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
_lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowercase )
_lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowercase , padding=_lowercase , return_tensors="""np""" )
_lowerCAmelCase = model(**_lowercase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowerCAmelCase = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowercase , atol=1e-3 ) )
| 5 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
lowerCamelCase_ = jnp.floataa
lowerCamelCase_ = True
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE__ = nn.Dense(5 , dtype=self.dtype )
def __call__( self :List[Any] , *__A :int , **__A :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super().__call__(*__A , **__A )
SCREAMING_SNAKE_CASE__ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = FlaxBigBirdForNaturalQuestionsModule
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
def cross_entropy(UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str]=None ):
SCREAMING_SNAKE_CASE__ = logits.shape[-1]
SCREAMING_SNAKE_CASE__ = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype("""f4""" )
SCREAMING_SNAKE_CASE__ = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 )
SCREAMING_SNAKE_CASE__ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
SCREAMING_SNAKE_CASE__ = reduction(UpperCamelCase__ )
return loss
SCREAMING_SNAKE_CASE__ = partial(UpperCamelCase__ , reduction=jnp.mean )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = "google/bigbird-roberta-base"
lowerCamelCase_ = 30_00
lowerCamelCase_ = 1_05_00
lowerCamelCase_ = 1_28
lowerCamelCase_ = 3
lowerCamelCase_ = 1
lowerCamelCase_ = 5
# tx_args
lowerCamelCase_ = 3e-5
lowerCamelCase_ = 0.0
lowerCamelCase_ = 2_00_00
lowerCamelCase_ = 0.0095
lowerCamelCase_ = "bigbird-roberta-natural-questions"
lowerCamelCase_ = "training-expt"
lowerCamelCase_ = "data/nq-training.jsonl"
lowerCamelCase_ = "data/nq-validation.jsonl"
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=__A )
SCREAMING_SNAKE_CASE__ = os.path.join(self.base_dir , self.save_dir )
SCREAMING_SNAKE_CASE__ = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = 40_96 # no dynamic padding on TPUs
def __call__( self :Optional[Any] , __A :Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.collate_fn(__A )
SCREAMING_SNAKE_CASE__ = jax.tree_util.tree_map(__A , __A )
return batch
def _snake_case ( self :List[Any] , __A :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.fetch_inputs(features["""input_ids"""] )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": jnp.array(__A , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(__A , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def _snake_case ( self :Tuple , __A :list ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self._fetch_inputs(__A ) for ids in input_ids]
return zip(*__A )
def _snake_case ( self :List[str] , __A :list ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [1 for _ in range(len(__A ) )]
while len(__A ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any]=None ):
if seed is not None:
SCREAMING_SNAKE_CASE__ = dataset.shuffle(seed=UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) // batch_size ):
SCREAMING_SNAKE_CASE__ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase__ )
@partial(jax.pmap , axis_name="""batch""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ):
def loss_fn(UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs
return state.loss_fn(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = jax.random.split(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = jax.value_and_grad(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = grad_fn(state.params )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean(UpperCamelCase__ , """batch""" )
SCREAMING_SNAKE_CASE__ = state.apply_gradients(grads=UpperCamelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs
SCREAMING_SNAKE_CASE__ = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class UpperCamelCase_ ( train_state.TrainState ):
lowerCamelCase_ = struct.field(pytree_node=UpperCamelCase__ )
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = None
def _snake_case ( self :List[Any] , __A :str , __A :str , __A :str , __A :Tuple=None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = model.params
SCREAMING_SNAKE_CASE__ = TrainState.create(
apply_fn=model.__call__ , params=__A , tx=__A , loss_fn=__A , )
if ckpt_dir is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = restore_checkpoint(__A , __A )
SCREAMING_SNAKE_CASE__ = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = build_tx(**__A )
SCREAMING_SNAKE_CASE__ = train_state.TrainState(
step=__A , apply_fn=model.__call__ , params=__A , tx=__A , opt_state=__A , )
SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = data_collator
SCREAMING_SNAKE_CASE__ = lr
SCREAMING_SNAKE_CASE__ = params
SCREAMING_SNAKE_CASE__ = jax_utils.replicate(__A )
return state
def _snake_case ( self :Optional[Any] , __A :Optional[int] , __A :int , __A :int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.args
SCREAMING_SNAKE_CASE__ = len(__A ) // args.batch_size
SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ = jax.random.split(__A , jax.device_count() )
for epoch in range(args.max_epochs ):
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , args.batch_size , seed=__A )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(__A , total=__A , desc=f'''Running EPOCH-{epoch}''' ):
SCREAMING_SNAKE_CASE__ = self.data_collator(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.train_step_fn(__A , __A , **__A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(state.step )
SCREAMING_SNAKE_CASE__ = running_loss.item() / i
SCREAMING_SNAKE_CASE__ = self.scheduler_fn(state_step - 1 )
SCREAMING_SNAKE_CASE__ = self.evaluate(__A , __A )
SCREAMING_SNAKE_CASE__ = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(__A ) )
self.logger.log(__A , commit=__A )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=__A )
def _snake_case ( self :List[str] , __A :Dict , __A :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , self.args.batch_size )
SCREAMING_SNAKE_CASE__ = len(__A ) // self.args.batch_size
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(__A , total=__A , desc="""Evaluating ... """ ):
SCREAMING_SNAKE_CASE__ = self.data_collator(__A )
SCREAMING_SNAKE_CASE__ = self.val_step_fn(__A , **__A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _snake_case ( self :List[Any] , __A :Any , __A :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(__A )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ )
self.model_save_fn(__A , params=state.params )
with open(os.path.join(__A , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__A , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(__A , """data_collator.joblib""" ) )
with open(os.path.join(__A , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , __A )
print("""DONE""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ):
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ )
with open(os.path.join(UpperCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.opt_state , f.read() )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """args.joblib""" ) )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) )
with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """r""" ) as f:
SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = num_train_steps - warmup_steps
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1e-7 , transition_steps=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ):
def weight_decay_mask(UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ )
return tx, lr | 6 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = '''xmod'''
def __init__( self : str , _UpperCAmelCase : Optional[Any]=30_522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : int=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Dict=3_072 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Any=1E-1_2 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : int=False , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Tuple=("en_XX",) , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : Optional[Any] , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
_A = pre_norm
_A = adapter_reduction_factor
_A = adapter_layer_norm
_A = adapter_reuse_layer_norm
_A = ln_before_adapter
_A = list(_UpperCAmelCase )
_A = default_language
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.task == "multiple-choice":
_A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 7 |
from torch import nn
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' ) | 6 | 0 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
if not is_accelerate_available():
return method
__A : Union[str, Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(__snake_case ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[int] , *__snake_case : int , **__snake_case : int ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *__snake_case , **__snake_case )
return wrapper | 8 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 847
SCREAMING_SNAKE_CASE__ = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 150
SCREAMING_SNAKE_CASE__ = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 171
SCREAMING_SNAKE_CASE__ = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
SCREAMING_SNAKE_CASE__ = 133
SCREAMING_SNAKE_CASE__ = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 19
SCREAMING_SNAKE_CASE__ = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 65
SCREAMING_SNAKE_CASE__ = """mapillary-vistas-id2label.json"""
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] ):
SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = val
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: bool = False ):
SCREAMING_SNAKE_CASE__ = get_maskformer_config(UpperCamelCase__ )
# load original state_dict
with open(UpperCamelCase__ , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# update to torch tensors
for key, value in state_dict.items():
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
# load 🤗 model
SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(UpperCamelCase__ )
model.eval()
for name, param in model.named_parameters():
print(UpperCamelCase__ , param.shape )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCamelCase__ ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
SCREAMING_SNAKE_CASE__ = prepare_img()
if "vistas" in model_name:
SCREAMING_SNAKE_CASE__ = 65
elif "cityscapes" in model_name:
SCREAMING_SNAKE_CASE__ = 65_535
else:
SCREAMING_SNAKE_CASE__ = 255
SCREAMING_SNAKE_CASE__ = True if """ade""" in model_name else False
SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=UpperCamelCase__ , reduce_labels=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase__ )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCamelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 6 | 0 |
from __future__ import annotations
from fractions import Fraction
def A ( __UpperCamelCase , __UpperCamelCase ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A ( __UpperCamelCase ) -> list[str]:
A__ = []
A__ = 11
A__ = int('1' + '0' * digit_len )
for num in range(__UpperCamelCase , __UpperCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__UpperCamelCase , __UpperCamelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
A__ = 10
return solutions
def A ( __UpperCamelCase = 2 ) -> int:
A__ = 1.0
for fraction in fraction_list(__UpperCamelCase ):
A__ = Fraction(__UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 9 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowerCamelCase = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowerCamelCase = 0
_lowerCamelCase = 0XE0_00
_lowerCamelCase = 0XE0_01
_lowerCamelCase = 0XE0_02
_lowerCamelCase = 0XE0_03
_lowerCamelCase = 0XE0_04
# Maps special codepoints to human-readable names.
_lowerCamelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :str , __A :str=chr(__A ) , __A :str=chr(__A ) , __A :Dict=chr(__A ) , __A :str=chr(__A ) , __A :Union[str, Any]=chr(__A ) , __A :str=chr(__A ) , __A :int=False , __A :int=2048 , **__A :Dict , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE__ = len(self._special_codepoints )
@property
def _snake_case ( self :Optional[Any] ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def _snake_case ( self :Tuple , __A :str ) -> List[str]:
"""simple docstring"""
return list(__A )
def _snake_case ( self :Optional[Any] , __A :str ) -> int:
"""simple docstring"""
try:
return ord(__A )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def _snake_case ( self :str , __A :int ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__A )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Any:
"""simple docstring"""
return "".join(__A )
def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
result += ([0] * len(__A )) + [1]
return result
def _snake_case ( self :List[str] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Any:
"""simple docstring"""
return () | 6 | 0 |
_lowerCAmelCase = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 10 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_lowerCamelCase = 'bert-base-cased'
_lowerCamelCase = 'fp16'
_lowerCamelCase = 'bf16'
_lowerCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def _snake_case ( self :List[Any] ) -> Tuple:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = f'''{i + 1}'''
SCREAMING_SNAKE_CASE__ = strategy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = prefetch_policy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = state_dict_type
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A )
for policy in FSDP_AUTO_WRAP_POLICY:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = policy
if policy == "TRANSFORMER_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """2000"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """T5Layer"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
with self.assertRaises(__A ) as cm:
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """0"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _snake_case ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = mp_dtype
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = Accelerator()
if mp_dtype == "fp16":
SCREAMING_SNAKE_CASE__ = torch.floataa
elif mp_dtype == "bf16":
SCREAMING_SNAKE_CASE__ = torch.bfloataa
SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__A )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = str(__A ).lower()
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = 0.8_2
SCREAMING_SNAKE_CASE__ = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
SCREAMING_SNAKE_CASE__ = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def _snake_case ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" )
SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
SCREAMING_SNAKE_CASE__ = cmd.copy()
for i, strategy in enumerate(__A ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
SCREAMING_SNAKE_CASE__ = len(__A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
SCREAMING_SNAKE_CASE__ = cmd_config[:-1]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
SCREAMING_SNAKE_CASE__ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(__A ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() ) | 6 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowercase_ = (3, 9, -11, 0, 7, 5, 1, -1)
lowercase_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : int
__lowerCamelCase : Node | None
class __A :
'''simple docstring'''
def __init__(self , A ) -> None:
"""simple docstring"""
_a = None
for i in sorted(A , reverse=A ):
_a = Node(A , self.head )
def __iter__(self ) -> Iterator[int]:
"""simple docstring"""
_a = self.head
while node:
yield node.data
_a = node.next_node
def __len__(self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__(self ) -> str:
"""simple docstring"""
return " -> ".join([str(A ) for node in self] )
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return SortedLinkedList(list(__A) + list(__A))
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 11 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCamelCase = logging.get_logger(__name__)
# General docstring
_lowerCamelCase = 'PoolFormerConfig'
# Base docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = [1, 512, 7, 7]
# Image classification docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = 'tabby, tabby cat'
_lowerCamelCase = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ):
if drop_prob == 0.0 or not training:
return input
SCREAMING_SNAKE_CASE__ = 1 - drop_prob
SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor
return output
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = drop_prob
def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return drop_path(__A , self.drop_prob , self.training )
def _snake_case ( self :Dict ) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size)
SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride)
SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding)
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A )
SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity()
def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.projection(__A )
SCREAMING_SNAKE_CASE__ = self.norm(__A )
return embeddings
class UpperCamelCase_ ( nn.GroupNorm ):
def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict:
"""simple docstring"""
super().__init__(1 , __A , **__A )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :List[str] , __A :Optional[int] ) -> Any:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A )
def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.pool(__A ) - hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A )
if isinstance(config.hidden_act , __A ):
SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE__ = config.hidden_act
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.act_fn(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
# Useful for training neural nets
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity()
SCREAMING_SNAKE_CASE__ = config.use_layer_scale
if config.use_layer_scale:
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str:
"""simple docstring"""
if self.use_layer_scale:
SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = ()
SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
else:
SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) )
# First residual connection
SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states
SCREAMING_SNAKE_CASE__ = ()
# Second residual connection inside the PoolFormerOutput block
SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) )
SCREAMING_SNAKE_CASE__ = hidden_states + layer_output
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = config
# stochastic depth decay rule
SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
SCREAMING_SNAKE_CASE__ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
# Transformer blocks
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
SCREAMING_SNAKE_CASE__ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__A ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None
SCREAMING_SNAKE_CASE__ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers
# Get patch embeddings from hidden_states
SCREAMING_SNAKE_CASE__ = embedding_layer(__A )
# Send the embeddings through the blocks
for _, blk in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = blk(__A )
SCREAMING_SNAKE_CASE__ = layer_outputs[0]
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PoolFormerConfig
lowerCamelCase_ = "poolformer"
lowerCamelCase_ = "pixel_values"
lowerCamelCase_ = True
def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict:
"""simple docstring"""
if isinstance(__A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any:
"""simple docstring"""
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = value
_lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Any ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A )
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
SCREAMING_SNAKE_CASE__ = self.encoder(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :int , __A :Optional[int] ) -> Tuple:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size )
def _snake_case ( self :List[Any] , __A :Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dense(__A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :str , __A :Union[str, Any] ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config.num_labels
SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A )
# Final norm
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
SCREAMING_SNAKE_CASE__ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = self.poolformer(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = outputs[0]
SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) )
SCREAMING_SNAKE_CASE__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE__ = """single_label_classification"""
else:
SCREAMING_SNAKE_CASE__ = """multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE__ = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
if not return_dict:
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states ) | 6 | 0 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCamelCase__ : str = logging.get_logger(__name__)
class _snake_case ( UpperCAmelCase_ ):
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
| 12 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_lengths
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = gelu_activation
SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings
SCREAMING_SNAKE_CASE__ = causal
SCREAMING_SNAKE_CASE__ = asm
SCREAMING_SNAKE_CASE__ = n_langs
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_special
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = summary_type
SCREAMING_SNAKE_CASE__ = use_proj
SCREAMING_SNAKE_CASE__ = scope
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 )
def _snake_case ( self :int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__A )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__A )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__A )
@slow
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(config=__A )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE__ = torch.jit.trace(
__A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) )
SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A )
loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__A )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) ) | 6 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : Dict = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = 'time_series_transformer'
lowerCamelCase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "student_t" , SCREAMING_SNAKE_CASE_ = "nll" , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = [1, 2, 3, 4, 5, 6, 7] , SCREAMING_SNAKE_CASE_ = "mean" , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 64 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 1_00 , SCREAMING_SNAKE_CASE_ = 0.0_2 , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Any:
# time series specific configuration
__lowerCamelCase : Tuple = prediction_length
__lowerCamelCase : List[str] = context_length or prediction_length
__lowerCamelCase : str = distribution_output
__lowerCamelCase : Any = loss
__lowerCamelCase : List[str] = input_size
__lowerCamelCase : List[str] = num_time_features
__lowerCamelCase : str = lags_sequence
__lowerCamelCase : List[Any] = scaling
__lowerCamelCase : Dict = num_dynamic_real_features
__lowerCamelCase : Optional[Any] = num_static_real_features
__lowerCamelCase : Optional[Any] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__lowerCamelCase : Union[str, Any] = cardinality
else:
__lowerCamelCase : int = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__lowerCamelCase : Optional[int] = embedding_dimension
else:
__lowerCamelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCamelCase : List[Any] = num_parallel_samples
# Transformer architecture configuration
__lowerCamelCase : Dict = input_size * len(SCREAMING_SNAKE_CASE_ ) + self._number_of_features
__lowerCamelCase : List[Any] = d_model
__lowerCamelCase : List[str] = encoder_attention_heads
__lowerCamelCase : List[Any] = decoder_attention_heads
__lowerCamelCase : Dict = encoder_ffn_dim
__lowerCamelCase : List[str] = decoder_ffn_dim
__lowerCamelCase : List[str] = encoder_layers
__lowerCamelCase : List[Any] = decoder_layers
__lowerCamelCase : Any = dropout
__lowerCamelCase : str = attention_dropout
__lowerCamelCase : Tuple = activation_dropout
__lowerCamelCase : Tuple = encoder_layerdrop
__lowerCamelCase : Optional[Any] = decoder_layerdrop
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : Dict = init_std
__lowerCamelCase : Any = use_cache
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def lowercase_ ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 13 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple=True ):
model.train()
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any]=False ):
set_seed(42 )
SCREAMING_SNAKE_CASE__ = RegressionModel()
SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
# Test when on a single CPU or GPU that the context manager does nothing
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
# Test on distributed setup that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=False , UpperCamelCase__: Union[str, Any]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if iteration < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if batch_num < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(UpperCamelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(UpperCamelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 6 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a__ = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 14 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "AutoImageProcessor"
lowerCamelCase_ = "AutoTokenizer"
def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict:
"""simple docstring"""
super().__init__(__A , __A )
SCREAMING_SNAKE_CASE__ = self.image_processor
def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"] | 6 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
A : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
A : List[str] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
A : Any = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int = CHRF.CHAR_ORDER , _UpperCAmelCase : int = CHRF.WORD_ORDER , _UpperCAmelCase : int = CHRF.BETA , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , ) -> int:
"""simple docstring"""
lowercase__ = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase__ = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
lowercase__ = CHRF(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ = sb_chrf.corpus_score(_UpperCAmelCase , _UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 15 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ = True
for i in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE__ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE__ = dp[i][j - 1]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE__ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
SCREAMING_SNAKE_CASE__ = s - 2 * j
break
return diff | 6 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : List[Any] = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "data2vec-vision"
def __init__( self : Tuple , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : int=12 , __lowerCamelCase : Tuple=3072 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : List[str]=1e-12 , __lowerCamelCase : Union[str, Any]=224 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : str=3 , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : int=False , __lowerCamelCase : str=False , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[Any]=[3, 5, 7, 11] , __lowerCamelCase : Union[str, Any]=[1, 2, 3, 6] , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=0.4 , __lowerCamelCase : Dict=256 , __lowerCamelCase : int=1 , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Tuple=255 , **__lowerCamelCase : List[str] , ):
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = use_mask_token
SCREAMING_SNAKE_CASE = use_absolute_position_embeddings
SCREAMING_SNAKE_CASE = use_relative_position_bias
SCREAMING_SNAKE_CASE = use_shared_relative_position_bias
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = use_mean_pooling
# decode head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE = out_indices
SCREAMING_SNAKE_CASE = pool_scales
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE = use_auxiliary_head
SCREAMING_SNAKE_CASE = auxiliary_loss_weight
SCREAMING_SNAKE_CASE = auxiliary_channels
SCREAMING_SNAKE_CASE = auxiliary_num_convs
SCREAMING_SNAKE_CASE = auxiliary_concat_input
SCREAMING_SNAKE_CASE = semantic_loss_ignore_index
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = version.parse("1.11" )
@property
def _snake_case ( self : str ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : List[Any] ):
return 1e-4 | 16 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ):
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 6 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''Wav2Vec2FeatureExtractor'''
_lowercase : Any = '''AutoTokenizer'''
def __init__( self : List[str] , __A : List[Any] , __A : Optional[Any] ):
super().__init__(__A , __A )
__A : Optional[Any] = self.feature_extractor
__A : str = False
@classmethod
def lowerCAmelCase_ ( cls : Dict , __A : List[str] , **__A : Optional[Any] ):
try:
return super().from_pretrained(__A , **__A )
except OSError:
warnings.warn(
F"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , __A , )
__A : Any = WavaVecaFeatureExtractor.from_pretrained(__A , **__A )
__A : List[str] = WavaVecaCTCTokenizer.from_pretrained(__A , **__A )
return cls(feature_extractor=__A , tokenizer=__A )
def __call__( self : Optional[int] , *__A : Optional[Any] , **__A : List[str] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
__A : Any = kwargs.pop("""raw_speech""" )
else:
__A : List[str] = kwargs.pop("""audio""" , __A )
__A : int = kwargs.pop("""sampling_rate""" , __A )
__A : Tuple = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : Union[str, Any] = args[0]
__A : Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__A : Tuple = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if text is not None:
__A : Any = self.tokenizer(__A , **__A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__A : List[str] = encodings["""input_ids"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__A , **__A )
__A : Any = kwargs.pop("""input_features""" , __A )
__A : Tuple = kwargs.pop("""labels""" , __A )
if len(__A ) > 0:
__A : Any = args[0]
__A : Union[str, Any] = args[1:]
if input_features is not None:
__A : str = self.feature_extractor.pad(__A , *__A , **__A )
if labels is not None:
__A : Union[str, Any] = self.tokenizer.pad(__A , **__A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__A : Optional[Any] = labels["""input_ids"""]
return input_features
def lowerCAmelCase_ ( self : List[Any] , *__A : List[Any] , **__A : List[str] ):
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Optional[int] ):
return self.tokenizer.decode(*__A , **__A )
@contextmanager
def lowerCAmelCase_ ( self : str ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
__A : Dict = True
__A : List[str] = self.tokenizer
yield
__A : Tuple = self.feature_extractor
__A : int = False
| 17 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "encoder-decoder"
lowerCamelCase_ = True
def __init__( self :Optional[int] , **__A :str ) -> int:
"""simple docstring"""
super().__init__(**__A )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = True
@classmethod
def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A )
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.encoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.decoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output | 6 | 0 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase_ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self , _lowerCAmelCase=None , **_lowerCAmelCase ) -> int:
super().__init__(features=_lowerCAmelCase )
_lowerCAmelCase = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _snake_case ( self , _lowerCAmelCase ) -> str:
import torch
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and column:
if all(
isinstance(_lowerCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(_lowerCAmelCase )
return column
def _snake_case ( self , _lowerCAmelCase ) -> Union[str, Any]:
import torch
if isinstance(_lowerCAmelCase , (str, bytes, type(_lowerCAmelCase )) ):
return value
elif isinstance(_lowerCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_lowerCAmelCase = {}
if isinstance(_lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
_lowerCAmelCase = {"dtype": torch.intaa}
elif isinstance(_lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_lowerCAmelCase = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_lowerCAmelCase , PIL.Image.Image ):
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
return torch.tensor(_lowerCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def _snake_case ( self , _lowerCAmelCase ) -> str:
import torch
# support for torch, tf, jax etc.
if hasattr(_lowerCAmelCase , "__array__" ) and not isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_lowerCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_lowerCAmelCase ) for substruct in data_struct] )
elif isinstance(_lowerCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_lowerCAmelCase ) for substruct in data_struct] )
return self._tensorize(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Union[str, Any]:
return map_nested(self._recursive_tensorize , _lowerCAmelCase , map_list=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Mapping:
_lowerCAmelCase = self.numpy_arrow_extractor().extract_row(_lowerCAmelCase )
_lowerCAmelCase = self.python_features_decoder.decode_row(_lowerCAmelCase )
return self.recursive_tensorize(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> "torch.Tensor":
_lowerCAmelCase = self.numpy_arrow_extractor().extract_column(_lowerCAmelCase )
_lowerCAmelCase = self.python_features_decoder.decode_column(_lowerCAmelCase , pa_table.column_names[0] )
_lowerCAmelCase = self.recursive_tensorize(_lowerCAmelCase )
_lowerCAmelCase = self._consolidate(_lowerCAmelCase )
return column
def _snake_case ( self , _lowerCAmelCase ) -> Mapping:
_lowerCAmelCase = self.numpy_arrow_extractor().extract_batch(_lowerCAmelCase )
_lowerCAmelCase = self.python_features_decoder.decode_batch(_lowerCAmelCase )
_lowerCAmelCase = self.recursive_tensorize(_lowerCAmelCase )
for column_name in batch:
_lowerCAmelCase = self._consolidate(batch[column_name] )
return batch
| 18 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class UpperCamelCase_ ( UpperCamelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase_ = Features({"text": Value("string" )} )
lowerCamelCase_ = Features({"labels": ClassLabel} )
lowerCamelCase_ = "text"
lowerCamelCase_ = "labels"
def _snake_case ( self :Any , __A :Dict ) -> Optional[Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ = self.label_schema.copy()
SCREAMING_SNAKE_CASE__ = features[self.label_column]
SCREAMING_SNAKE_CASE__ = label_schema
return task_template
@property
def _snake_case ( self :str ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
} | 6 | 0 |
"""simple docstring"""
import math
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
_UpperCamelCase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__snake_case )
def lowerCamelCase__ ( __snake_case = 1 / 1_23_45 ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = 3
while True:
_UpperCamelCase = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__snake_case ):
_UpperCamelCase = int(__snake_case )
total_partitions += 1
if check_partition_perfect(__snake_case ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__snake_case )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = model.config
SCREAMING_SNAKE_CASE__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
SCREAMING_SNAKE_CASE__ = MBartConfig(
is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
if "encoder.model" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
SCREAMING_SNAKE_CASE__ = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE__ = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE__ = """encoder.layernorm.bias"""
return name
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
SCREAMING_SNAKE_CASE__ = key.split(""".""" )
SCREAMING_SNAKE_CASE__ = int(key_split[3] )
SCREAMING_SNAKE_CASE__ = int(key_split[5] )
SCREAMING_SNAKE_CASE__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ = val[:dim, :]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE__ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ = val[:dim]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
SCREAMING_SNAKE_CASE__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int=None , UpperCamelCase__: str=False ):
# load original model
SCREAMING_SNAKE_CASE__ = DonutModel.from_pretrained(UpperCamelCase__ ).eval()
# load HuggingFace model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_configs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = DonutSwinModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = original_model.state_dict()
SCREAMING_SNAKE_CASE__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify results on scanned document
SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/example-documents""" )
SCREAMING_SNAKE_CASE__ = dataset["""test"""][0]["""image"""].convert("""RGB""" )
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
SCREAMING_SNAKE_CASE__ = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
SCREAMING_SNAKE_CASE__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
SCREAMING_SNAKE_CASE__ = """When is the coffee break?"""
SCREAMING_SNAKE_CASE__ = task_prompt.replace("""{user_input}""" , UpperCamelCase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
SCREAMING_SNAKE_CASE__ = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
SCREAMING_SNAKE_CASE__ = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
SCREAMING_SNAKE_CASE__ = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
SCREAMING_SNAKE_CASE__ = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
SCREAMING_SNAKE_CASE__ = """hello world"""
else:
raise ValueError("""Model name not supported""" )
SCREAMING_SNAKE_CASE__ = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[
"""input_ids"""
]
SCREAMING_SNAKE_CASE__ = original_model.encoder.model.patch_embed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.encoder.embeddings(UpperCamelCase__ )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
# verify encoder hidden states
SCREAMING_SNAKE_CASE__ = original_model.encoder(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = model.encoder(UpperCamelCase__ ).last_hidden_state
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
# verify decoder hidden states
SCREAMING_SNAKE_CASE__ = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
_lowerCamelCase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 6 | 0 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_lowerCAmelCase: Optional[int] = None
try:
import msvcrt
except ImportError:
_lowerCAmelCase: List[str] = None
try:
import fcntl
except ImportError:
_lowerCAmelCase: Dict = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_lowerCAmelCase: str = OSError
# Data
# ------------------------------------------------
_lowerCAmelCase: Optional[Any] = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
_lowerCAmelCase: List[Any] = '3.0.12'
_lowerCAmelCase: Any = None
def _lowercase( ):
global _logger
a__ =_logger or logging.getLogger(__name__ )
return _logger
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_) -> Dict:
a__ =lock_file
return None
def __str__( self) -> Tuple:
a__ =F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class lowercase_ :
def __init__( self , lowercase_) -> Union[str, Any]:
a__ =lock
return None
def __enter__( self) -> Dict:
return self.lock
def __exit__( self , lowercase_ , lowercase_ , lowercase_) -> int:
self.lock.release()
return None
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=-1 , lowercase_=None) -> Any:
a__ =max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
a__ =self.hash_filename_if_too_long(lowercase_ , lowercase_)
# The path to the lock file.
a__ =lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
a__ =None
# The default timeout value.
a__ =timeout
# We use this lock primarily for the lock counter.
a__ =threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
a__ =0
return None
@property
def __UpperCamelCase ( self) -> str:
return self._lock_file
@property
def __UpperCamelCase ( self) -> Optional[int]:
return self._timeout
@timeout.setter
def __UpperCamelCase ( self , lowercase_) -> List[str]:
a__ =float(lowercase_)
return None
def __UpperCamelCase ( self) -> Tuple:
raise NotImplementedError()
def __UpperCamelCase ( self) -> Dict:
raise NotImplementedError()
@property
def __UpperCamelCase ( self) -> int:
return self._lock_file_fd is not None
def __UpperCamelCase ( self , lowercase_=None , lowercase_=0.05) -> List[str]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
a__ =self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
a__ =id(self)
a__ =self._lock_file
a__ =time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""")
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""")
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""")
raise Timeout(self._lock_file)
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""")
time.sleep(lowercase_)
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
a__ =max(0 , self._lock_counter - 1)
raise
return _Acquire_ReturnProxy(lock=self)
def __UpperCamelCase ( self , lowercase_=False) -> Optional[Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
a__ =id(self)
a__ =self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""")
self._release()
a__ =0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""")
return None
def __enter__( self) -> str:
self.acquire()
return self
def __exit__( self , lowercase_ , lowercase_ , lowercase_) -> List[str]:
self.release()
return None
def __del__( self) -> List[str]:
self.release(force=lowercase_)
return None
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =os.path.basename(lowercase_)
if len(lowercase_) > max_length and max_length > 0:
a__ =os.path.dirname(lowercase_)
a__ =str(hash(lowercase_))
a__ =filename[: max_length - len(lowercase_) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(lowercase_ , lowercase_)
else:
return path
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_=-1 , lowercase_=None) -> Dict:
from .file_utils import relative_to_absolute_path
super().__init__(lowercase_ , timeout=lowercase_ , max_filename_length=lowercase_)
a__ ='\\\\?\\' + relative_to_absolute_path(self.lock_file)
def __UpperCamelCase ( self) -> Tuple:
a__ =os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
a__ =os.open(self._lock_file , lowercase_)
except OSError:
pass
else:
try:
msvcrt.locking(lowercase_ , msvcrt.LK_NBLCK , 1)
except OSError:
os.close(lowercase_)
else:
a__ =fd
return None
def __UpperCamelCase ( self) -> List[Any]:
a__ =self._lock_file_fd
a__ =None
msvcrt.locking(lowercase_ , msvcrt.LK_UNLCK , 1)
os.close(lowercase_)
try:
os.remove(self._lock_file)
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowercase_ (lowercase__ ):
def __init__( self , lowercase_ , lowercase_=-1 , lowercase_=None) -> Union[str, Any]:
a__ =os.statvfs(os.path.dirname(lowercase_)).f_namemax
super().__init__(lowercase_ , timeout=lowercase_ , max_filename_length=lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =os.O_RDWR | os.O_CREAT | os.O_TRUNC
a__ =os.open(self._lock_file , lowercase_)
try:
fcntl.flock(lowercase_ , fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
os.close(lowercase_)
else:
a__ =fd
return None
def __UpperCamelCase ( self) -> Union[str, Any]:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
a__ =self._lock_file_fd
a__ =None
fcntl.flock(lowercase_ , fcntl.LOCK_UN)
os.close(lowercase_)
return None
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
a__ =os.open(self._lock_file , lowercase_)
except OSError:
pass
else:
a__ =fd
return None
def __UpperCamelCase ( self) -> List[str]:
os.close(self._lock_file_fd)
a__ =None
try:
os.remove(self._lock_file)
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_lowerCAmelCase: Any = None
if msvcrt:
_lowerCAmelCase: int = WindowsFileLock
elif fcntl:
_lowerCAmelCase: List[str] = UnixFileLock
else:
_lowerCAmelCase: List[str] = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 20 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__A , )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 6 | 0 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__magic_name__ : Dict ={
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
__magic_name__ , __magic_name__ : List[Any] =input_paths_and_base_extractors[compression_format]
if input_path is None:
__magic_name__ : Optional[Any] =F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase )
assert base_extractor.is_extractable(lowerCamelCase )
__magic_name__ : List[str] =tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(lowerCamelCase , lowerCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__magic_name__ : Any =file_path.read_text(encoding="""utf-8""" )
else:
__magic_name__ : Any =output_path.read_text(encoding="""utf-8""" )
__magic_name__ : Tuple =text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__magic_name__ : List[str] ={
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
__magic_name__ : int =input_paths[compression_format]
if input_path is None:
__magic_name__ : List[Any] =F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase )
__magic_name__ : Optional[int] =Extractor.infer_extractor_format(lowerCamelCase )
assert extractor_format is not None
__magic_name__ : List[str] =tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__magic_name__ : List[str] =file_path.read_text(encoding="""utf-8""" )
else:
__magic_name__ : Optional[int] =output_path.read_text(encoding="""utf-8""" )
__magic_name__ : str =text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
import tarfile
__magic_name__ : Union[str, Any] =tmp_path / """data_dot_dot"""
directory.mkdir()
__magic_name__ : List[str] =directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(lowerCamelCase , """w""" ) as f:
f.add(lowerCamelCase , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def lowerCAmelCase_ ( lowerCamelCase ):
import tarfile
__magic_name__ : Any =tmp_path / """data_sym_link"""
directory.mkdir()
__magic_name__ : Dict =directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=lowerCamelCase )
with tarfile.TarFile(lowerCamelCase , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : int ={
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
__magic_name__ : Optional[int] =insecure_tar_files[insecure_tar_file]
__magic_name__ : str =tmp_path / """extracted"""
TarExtractor.extract(lowerCamelCase , lowerCamelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCAmelCase_ ( lowerCamelCase ):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
__magic_name__ : List[str] =tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
__magic_name__ : Any =(
B"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
B"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
B"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
B"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(lowerCamelCase )
assert zipfile.is_zipfile(str(lowerCamelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(lowerCamelCase ) # but we're right
| 21 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ):
try:
SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
while i * i <= n:
while n % i == 0:
SCREAMING_SNAKE_CASE__ = i
n //= i
i += 1
if n > 1:
SCREAMING_SNAKE_CASE__ = n
return int(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''') | 6 | 0 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict=13 , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Optional[Any]=99 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : List[Any]=32 , lowerCAmelCase_ : List[Any]=5 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Tuple=5_12 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : str="last" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=0 , ) -> List[Any]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_lengths
_a = use_token_type_ids
_a = use_labels
_a = gelu_activation
_a = sinusoidal_embeddings
_a = causal
_a = asm
_a = n_langs
_a = vocab_size
_a = n_special
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = summary_type
_a = use_proj
_a = scope
_a = bos_token_id
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_input_lengths:
_a = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , 2 ).float()
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , ) -> str:
"""simple docstring"""
_a = XLMModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ , lengths=lowerCAmelCase_ , langs=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , langs=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , ) -> int:
"""simple docstring"""
_a = XLMWithLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
_a = XLMForQuestionAnsweringSimple(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
_a = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , ) -> Tuple:
"""simple docstring"""
_a = XLMForQuestionAnswering(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ )
_a = model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , p_mask=lowerCAmelCase_ , )
_a = model(
lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , cls_index=lowerCAmelCase_ , is_impossible=lowerCAmelCase_ , )
((_a) , ) = result_with_labels.to_tuple()
_a = model(lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
((_a) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , ) -> Dict:
"""simple docstring"""
_a = XLMForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , ) -> Optional[Any]:
"""simple docstring"""
_a = self.num_labels
_a = XLMForTokenClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , ) -> Any:
"""simple docstring"""
_a = self.num_choices
_a = XLMForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class A ( _a ,_a ,_a ,unittest.TestCase ):
lowercase_ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> Optional[int]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int=False ) -> List[Any]:
"""simple docstring"""
_a = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = XLMModelTester(self )
_a = ConfigTester(self , config_class=lowerCAmelCase_ , emb_dim=37 )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str=False , lowerCAmelCase_ : str=1 ) -> int:
"""simple docstring"""
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase_ ) )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
_a = min_length + idx + 1
_a = min_length + idx + 1
_a = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=1 ) -> Optional[Any]:
"""simple docstring"""
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(
[isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase_ ) , )
self.assertEqual(len(lowerCAmelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase_ ):
# adds PAD dummy token
_a = min_length + idx + 1
_a = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase_ ) , )
pass
@slow
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = XLMModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_a = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(lowerCAmelCase_ )
_a = torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president
_a = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_a = model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase_ )
| 22 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__A ) )
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(__A ) )
def _snake_case ( self :Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) ) | 6 | 0 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : Dict = """Hello world! cécé herlolip"""
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = FairseqRobertaModel.from_pretrained(__lowercase)
roberta.eval() # disable dropout
UpperCamelCase_ = roberta.model.encoder.sentence_encoder
UpperCamelCase_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
UpperCamelCase_ = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , __lowercase)
UpperCamelCase_ = XLMRobertaXLForSequenceClassification(__lowercase) if classification_head else XLMRobertaXLForMaskedLM(__lowercase)
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCamelCase_ = roberta_sent_encoder.embed_tokens.weight
UpperCamelCase_ = roberta_sent_encoder.embed_positions.weight
UpperCamelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c RoBERTa doesn't use them.
UpperCamelCase_ = roberta_sent_encoder.layer_norm.weight
UpperCamelCase_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
UpperCamelCase_ = model.roberta.encoder.layer[i]
UpperCamelCase_ = roberta_sent_encoder.layers[i]
UpperCamelCase_ = layer.attention
UpperCamelCase_ = roberta_layer.self_attn_layer_norm.weight
UpperCamelCase_ = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCamelCase_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
)
UpperCamelCase_ = roberta_layer.self_attn.q_proj.weight
UpperCamelCase_ = roberta_layer.self_attn.q_proj.bias
UpperCamelCase_ = roberta_layer.self_attn.k_proj.weight
UpperCamelCase_ = roberta_layer.self_attn.k_proj.bias
UpperCamelCase_ = roberta_layer.self_attn.v_proj.weight
UpperCamelCase_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCamelCase_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCamelCase_ = roberta_layer.self_attn.out_proj.weight
UpperCamelCase_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCamelCase_ = roberta_layer.final_layer_norm.weight
UpperCamelCase_ = roberta_layer.final_layer_norm.bias
# intermediate
UpperCamelCase_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCamelCase_ = roberta_layer.fca.weight
UpperCamelCase_ = roberta_layer.fca.bias
# output
UpperCamelCase_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCamelCase_ = roberta_layer.fca.weight
UpperCamelCase_ = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCamelCase_ = roberta.model.classification_heads['mnli'].dense.weight
UpperCamelCase_ = roberta.model.classification_heads['mnli'].dense.bias
UpperCamelCase_ = roberta.model.classification_heads['mnli'].out_proj.weight
UpperCamelCase_ = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
UpperCamelCase_ = roberta.model.encoder.lm_head.dense.weight
UpperCamelCase_ = roberta.model.encoder.lm_head.dense.bias
UpperCamelCase_ = roberta.model.encoder.lm_head.layer_norm.weight
UpperCamelCase_ = roberta.model.encoder.lm_head.layer_norm.bias
UpperCamelCase_ = roberta.model.encoder.lm_head.weight
UpperCamelCase_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCamelCase_ = roberta.encode(__lowercase).unsqueeze(0) # batch of size 1
UpperCamelCase_ = model(__lowercase)[0]
if classification_head:
UpperCamelCase_ = roberta.model.classification_heads['mnli'](roberta.extract_features(__lowercase))
else:
UpperCamelCase_ = roberta.model(__lowercase)[0]
print(our_output.shape , their_output.shape)
UpperCamelCase_ = torch.max(torch.abs(our_output - their_output)).item()
print(f"""max_absolute_diff = {max_absolute_diff}""") # ~ 1e-7
UpperCamelCase_ = torch.allclose(__lowercase , __lowercase , atol=1e-3)
print('Do both models output the same tensors?' , '🔥' if success else '💩')
if not success:
raise Exception('Something went wRoNg')
pathlib.Path(__lowercase).mkdir(parents=__lowercase , exist_ok=__lowercase)
print(f"""Saving model to {pytorch_dump_folder_path}""")
model.save_pretrained(__lowercase)
if __name__ == "__main__":
snake_case__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
snake_case__ : int = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 23 |
import argparse
import datetime
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
SCREAMING_SNAKE_CASE__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCamelCase__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
SCREAMING_SNAKE_CASE__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
SCREAMING_SNAKE_CASE__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
SCREAMING_SNAKE_CASE__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
SCREAMING_SNAKE_CASE__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
SCREAMING_SNAKE_CASE__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
SCREAMING_SNAKE_CASE__ = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) )
# Start math
if m <= 2:
SCREAMING_SNAKE_CASE__ = y - 1
SCREAMING_SNAKE_CASE__ = m + 12
# maths var
SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[:2] )
SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[2:] )
SCREAMING_SNAKE_CASE__ = int(2.6 * m - 5.3_9 )
SCREAMING_SNAKE_CASE__ = int(c / 4 )
SCREAMING_SNAKE_CASE__ = int(k / 4 )
SCREAMING_SNAKE_CASE__ = int(d + k )
SCREAMING_SNAKE_CASE__ = int(t + u + v + x )
SCREAMING_SNAKE_CASE__ = int(z - (2 * c) )
SCREAMING_SNAKE_CASE__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
SCREAMING_SNAKE_CASE__ = f'''Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
_lowerCamelCase = parser.parse_args()
zeller(args.date_input) | 6 | 0 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCAmelCase ( nn.Module):
def __init__( self , __SCREAMING_SNAKE_CASE = 16 , __SCREAMING_SNAKE_CASE = 88 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = 32 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "geglu" , __SCREAMING_SNAKE_CASE = None , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
__snake_case = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , num_layers=__SCREAMING_SNAKE_CASE , dropout=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , cross_attention_dim=__SCREAMING_SNAKE_CASE , attention_bias=__SCREAMING_SNAKE_CASE , sample_size=__SCREAMING_SNAKE_CASE , num_vector_embeds=__SCREAMING_SNAKE_CASE , activation_fn=__SCREAMING_SNAKE_CASE , num_embeds_ada_norm=__SCREAMING_SNAKE_CASE , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__snake_case = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__snake_case = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__snake_case = [1, 0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = True , ) -> List[str]:
'''simple docstring'''
__snake_case = hidden_states
__snake_case = []
__snake_case = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__snake_case = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__snake_case = self.transformer_index_for_condition[i]
__snake_case = self.transformers[transformer_index](
__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , cross_attention_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__snake_case = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__snake_case = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__SCREAMING_SNAKE_CASE )
| 24 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_lowerCamelCase = logging.getLogger(__name__)
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=30522, type=int)
_lowerCamelCase = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, 'rb') as fp:
_lowerCamelCase = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
_lowerCamelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
_lowerCamelCase = [0] * args.vocab_size
for k, v in counter.items():
_lowerCamelCase = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) | 6 | 0 |
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = 0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE : Optional[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 25 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : str = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case : List[Any] = old_name.split(""".""" )
if layer == "0":
__snake_case : int = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
__snake_case : List[str] = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
__snake_case : List[str] = old_name.replace("""3""" , """convolution2""" )
else:
__snake_case : str = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(R"""\d\.\d""" , _lowerCamelCase ):
__snake_case : str = R"""\b\d{2}\b"""
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case : int = re.search(R"""\d\.\d\d.""" , _lowerCamelCase ).group()
else:
__snake_case : List[Any] = re.search(R"""\d\.\d.""" , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case : Dict = old_name.replace(_lowerCamelCase , """""" )
__snake_case : Any = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
__snake_case : Optional[int] = """intermediate_stages.""" + trimmed_name
else:
__snake_case : Dict = old_name.replace(_lowerCamelCase , """""" )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case : Optional[Any] = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
__snake_case : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case : Optional[int] = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
__snake_case : int = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
__snake_case : Optional[int] = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
__snake_case : List[Any] = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
__snake_case : List[Any] = trimmed_name.replace("""fc2""" , """linear_out""" )
__snake_case : Any = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(R""".\d.""" , _lowerCamelCase ):
__snake_case : int = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
__snake_case : Optional[Any] = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case : Tuple = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case : Dict = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
__snake_case : str = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
__snake_case : str = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
__snake_case : Union[str, Any] = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
__snake_case : int = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case : Tuple = new_name.replace("""norm""" , """layernorm""" )
__snake_case : Dict = """efficientformer.""" + new_name
else:
__snake_case : str = """efficientformer.encoder.""" + new_name
return new_name
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
for key in checkpoint.copy().keys():
__snake_case : int = checkpoint.pop(_lowerCamelCase )
__snake_case : int = val
return checkpoint
def _a ( ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : List[Any] = torch.load(_lowerCamelCase , map_location="""cpu""" )["""model"""]
__snake_case : Union[str, Any] = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case : Dict = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case : List[Any] = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
__snake_case : Dict = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case : Any = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case : Optional[int] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
__snake_case : List[str] = prepare_img()
__snake_case : Tuple = 256
__snake_case : Dict = 224
__snake_case : Any = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
__snake_case : Dict = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values
# original processing pipeline
__snake_case : int = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case : int = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case : Dict = model(_lowerCamelCase )
__snake_case : Dict = outputs.logits
__snake_case : List[str] = (1, 1000)
if "l1" in model_name:
__snake_case : Dict = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case : int = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case : int = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add model""" , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add image processor""" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
__UpperCamelCase = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 26 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "OwlViTImageProcessor"
lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self :Optional[Any] , __A :int=None , __A :Optional[int]=None , **__A :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__A , __A )
def __call__( self :str , __A :Dict=None , __A :List[str]=None , __A :str=None , __A :Optional[int]="max_length" , __A :Tuple="np" , **__A :int ) -> Tuple:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )):
SCREAMING_SNAKE_CASE__ = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )]
elif isinstance(__A , __A ) and isinstance(text[0] , __A ):
SCREAMING_SNAKE_CASE__ = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE__ = max([len(__A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__A ) != max_num_queries:
SCREAMING_SNAKE_CASE__ = t + [""" """] * (max_num_queries - len(__A ))
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )
encodings.append(__A )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = input_ids
SCREAMING_SNAKE_CASE__ = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = self.image_processor(
__A , return_tensors=__A , **__A ).pixel_values
SCREAMING_SNAKE_CASE__ = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :List[Any] , *__A :Dict , **__A :Dict ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process(*__A , **__A )
def _snake_case ( self :Optional[int] , *__A :Dict , **__A :List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*__A , **__A )
def _snake_case ( self :str , *__A :List[str] , **__A :Union[str, Any] ) -> Any:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*__A , **__A )
def _snake_case ( self :Dict , *__A :List[str] , **__A :List[str] ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :Dict , *__A :Dict , **__A :List[str] ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , )
return self.image_processor_class
@property
def _snake_case ( self :Any ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , )
return self.image_processor | 6 | 0 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=2 , snake_case_=8 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=16 , snake_case_=5 , snake_case_=2 , snake_case_=36 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self ):
_A = self.get_config()
_A = 300
return config
def lowerCAmelCase__ ( self ):
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = self.prepare_config_and_inputs()
_A = True
_A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = MraModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_A = True
_A = MraModel(snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , encoder_hidden_states=snake_case_ , )
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = MraForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = MraForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = MraForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = MraForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_choices
_A = MraForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = config_and_inputs
_A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = ()
def lowerCAmelCase__ ( self ):
_A = MraModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = MraModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip(reason='MRA does not output attentions' )
def lowerCAmelCase__ ( self ):
return
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ):
_A = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
_A = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_A = model(snake_case_ )[0]
_A = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
_A = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_A = model(snake_case_ )[0]
_A = 5_0265
_A = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
_A = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
_A = model(snake_case_ )[0]
_A = 5_0265
_A = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
| 27 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :Union[str, Any] , __A :int = 3 , __A :int = 3 , __A :Tuple[str] = ("DownEncoderBlock2D",) , __A :Tuple[str] = ("UpDecoderBlock2D",) , __A :Tuple[int] = (64,) , __A :int = 1 , __A :str = "silu" , __A :int = 3 , __A :int = 32 , __A :int = 256 , __A :int = 32 , __A :Optional[int] = None , __A :float = 0.1_8_2_1_5 , __A :str = "group" , ) -> Any:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE__ = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
SCREAMING_SNAKE_CASE__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = VectorQuantizer(__A , __A , beta=0.2_5 , remap=__A , sane_index_shape=__A )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
# pass init params to Decoder
SCREAMING_SNAKE_CASE__ = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , )
@apply_forward_hook
def _snake_case ( self :Union[str, Any] , __A :torch.FloatTensor , __A :bool = True ) -> VQEncoderOutput:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.encoder(__A )
SCREAMING_SNAKE_CASE__ = self.quant_conv(__A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__A )
@apply_forward_hook
def _snake_case ( self :Tuple , __A :torch.FloatTensor , __A :bool = False , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if not force_not_quantize:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.quantize(__A )
else:
SCREAMING_SNAKE_CASE__ = h
SCREAMING_SNAKE_CASE__ = self.post_quant_conv(__A )
SCREAMING_SNAKE_CASE__ = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
def _snake_case ( self :int , __A :torch.FloatTensor , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = sample
SCREAMING_SNAKE_CASE__ = self.encode(__A ).latents
SCREAMING_SNAKE_CASE__ = self.decode(__A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A ) | 6 | 0 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ):
"""simple docstring"""
assert x is not None
assert y is not None
SCREAMING_SNAKE_CASE : Dict = len(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = len(__UpperCamelCase )
# declaring the array for storing the dp values
SCREAMING_SNAKE_CASE : Optional[Any] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 ,m + 1 ):
for j in range(1 ,n + 1 ):
SCREAMING_SNAKE_CASE : Dict = 1 if x[i - 1] == y[j - 1] else 0
SCREAMING_SNAKE_CASE : List[Any] = max(l[i - 1][j] ,l[i][j - 1] ,l[i - 1][j - 1] + match )
SCREAMING_SNAKE_CASE : Dict = ''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = m, n
while i > 0 and j > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
SCREAMING_SNAKE_CASE : List[Any] = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
UpperCamelCase_ = "AGGTAB"
UpperCamelCase_ = "GXTXAYB"
UpperCamelCase_ = 4
UpperCamelCase_ = "GTAB"
UpperCamelCase_ , UpperCamelCase_ = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 28 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
lowerCamelCase_ = jnp.floataa
lowerCamelCase_ = True
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE__ = nn.Dense(5 , dtype=self.dtype )
def __call__( self :List[Any] , *__A :int , **__A :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super().__call__(*__A , **__A )
SCREAMING_SNAKE_CASE__ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = FlaxBigBirdForNaturalQuestionsModule
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
def cross_entropy(UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str]=None ):
SCREAMING_SNAKE_CASE__ = logits.shape[-1]
SCREAMING_SNAKE_CASE__ = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype("""f4""" )
SCREAMING_SNAKE_CASE__ = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 )
SCREAMING_SNAKE_CASE__ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
SCREAMING_SNAKE_CASE__ = reduction(UpperCamelCase__ )
return loss
SCREAMING_SNAKE_CASE__ = partial(UpperCamelCase__ , reduction=jnp.mean )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = "google/bigbird-roberta-base"
lowerCamelCase_ = 30_00
lowerCamelCase_ = 1_05_00
lowerCamelCase_ = 1_28
lowerCamelCase_ = 3
lowerCamelCase_ = 1
lowerCamelCase_ = 5
# tx_args
lowerCamelCase_ = 3e-5
lowerCamelCase_ = 0.0
lowerCamelCase_ = 2_00_00
lowerCamelCase_ = 0.0095
lowerCamelCase_ = "bigbird-roberta-natural-questions"
lowerCamelCase_ = "training-expt"
lowerCamelCase_ = "data/nq-training.jsonl"
lowerCamelCase_ = "data/nq-validation.jsonl"
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=__A )
SCREAMING_SNAKE_CASE__ = os.path.join(self.base_dir , self.save_dir )
SCREAMING_SNAKE_CASE__ = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = 40_96 # no dynamic padding on TPUs
def __call__( self :Optional[Any] , __A :Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.collate_fn(__A )
SCREAMING_SNAKE_CASE__ = jax.tree_util.tree_map(__A , __A )
return batch
def _snake_case ( self :List[Any] , __A :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.fetch_inputs(features["""input_ids"""] )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": jnp.array(__A , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(__A , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def _snake_case ( self :Tuple , __A :list ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self._fetch_inputs(__A ) for ids in input_ids]
return zip(*__A )
def _snake_case ( self :List[str] , __A :list ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [1 for _ in range(len(__A ) )]
while len(__A ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any]=None ):
if seed is not None:
SCREAMING_SNAKE_CASE__ = dataset.shuffle(seed=UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) // batch_size ):
SCREAMING_SNAKE_CASE__ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase__ )
@partial(jax.pmap , axis_name="""batch""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ):
def loss_fn(UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs
return state.loss_fn(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = jax.random.split(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = jax.value_and_grad(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = grad_fn(state.params )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean(UpperCamelCase__ , """batch""" )
SCREAMING_SNAKE_CASE__ = state.apply_gradients(grads=UpperCamelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs
SCREAMING_SNAKE_CASE__ = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class UpperCamelCase_ ( train_state.TrainState ):
lowerCamelCase_ = struct.field(pytree_node=UpperCamelCase__ )
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = None
def _snake_case ( self :List[Any] , __A :str , __A :str , __A :str , __A :Tuple=None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = model.params
SCREAMING_SNAKE_CASE__ = TrainState.create(
apply_fn=model.__call__ , params=__A , tx=__A , loss_fn=__A , )
if ckpt_dir is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = restore_checkpoint(__A , __A )
SCREAMING_SNAKE_CASE__ = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = build_tx(**__A )
SCREAMING_SNAKE_CASE__ = train_state.TrainState(
step=__A , apply_fn=model.__call__ , params=__A , tx=__A , opt_state=__A , )
SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = data_collator
SCREAMING_SNAKE_CASE__ = lr
SCREAMING_SNAKE_CASE__ = params
SCREAMING_SNAKE_CASE__ = jax_utils.replicate(__A )
return state
def _snake_case ( self :Optional[Any] , __A :Optional[int] , __A :int , __A :int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.args
SCREAMING_SNAKE_CASE__ = len(__A ) // args.batch_size
SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ = jax.random.split(__A , jax.device_count() )
for epoch in range(args.max_epochs ):
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , args.batch_size , seed=__A )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(__A , total=__A , desc=f'''Running EPOCH-{epoch}''' ):
SCREAMING_SNAKE_CASE__ = self.data_collator(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.train_step_fn(__A , __A , **__A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(state.step )
SCREAMING_SNAKE_CASE__ = running_loss.item() / i
SCREAMING_SNAKE_CASE__ = self.scheduler_fn(state_step - 1 )
SCREAMING_SNAKE_CASE__ = self.evaluate(__A , __A )
SCREAMING_SNAKE_CASE__ = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(__A ) )
self.logger.log(__A , commit=__A )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=__A )
def _snake_case ( self :List[str] , __A :Dict , __A :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , self.args.batch_size )
SCREAMING_SNAKE_CASE__ = len(__A ) // self.args.batch_size
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(__A , total=__A , desc="""Evaluating ... """ ):
SCREAMING_SNAKE_CASE__ = self.data_collator(__A )
SCREAMING_SNAKE_CASE__ = self.val_step_fn(__A , **__A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _snake_case ( self :List[Any] , __A :Any , __A :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(__A )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ )
self.model_save_fn(__A , params=state.params )
with open(os.path.join(__A , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__A , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(__A , """data_collator.joblib""" ) )
with open(os.path.join(__A , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , __A )
print("""DONE""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ):
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ )
with open(os.path.join(UpperCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.opt_state , f.read() )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """args.joblib""" ) )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) )
with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """r""" ) as f:
SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = num_train_steps - warmup_steps
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1e-7 , transition_steps=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ):
def weight_decay_mask(UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ )
return tx, lr | 6 | 0 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
A_ = logging.get_logger(__name__)
A_ = {}
A_ = {}
A_ = {}
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = None ,):
lowerCamelCase_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
lowerCamelCase_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
lowerCamelCase_ = format_type
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = None ):
lowerCamelCase_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowerCamelCase_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
A_ = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
A_ = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
A_ = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def lowercase ( lowerCAmelCase__ ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowercase ( lowerCAmelCase__ ,**lowerCAmelCase__ ):
lowerCamelCase_ = get_format_type_from_alias(lowerCAmelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCAmelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 29 |
from torch import nn
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' ) | 6 | 0 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__a = logging.get_logger('transformers.models.encodec')
__a = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
__a = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
__a = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
__a = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
__a = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
__a = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__a = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__a = []
__a = []
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
for attribute in key.split('''.''' ):
UpperCAmelCase_ : Optional[Any] = getattr(_lowercase , _lowercase )
if weight_type is not None:
UpperCAmelCase_ : List[Any] = getattr(_lowercase , _lowercase ).shape
else:
UpperCAmelCase_ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase_ : Tuple = value
elif weight_type == "weight_g":
UpperCAmelCase_ : Any = value
elif weight_type == "weight_v":
UpperCAmelCase_ : List[Any] = value
elif weight_type == "bias":
UpperCAmelCase_ : str = value
elif weight_type == "running_mean":
UpperCAmelCase_ : Union[str, Any] = value
elif weight_type == "running_var":
UpperCAmelCase_ : str = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase_ : List[Any] = value
elif weight_type == "weight_ih_l0":
UpperCAmelCase_ : str = value
elif weight_type == "weight_hh_l0":
UpperCAmelCase_ : str = value
elif weight_type == "bias_ih_l0":
UpperCAmelCase_ : Optional[Any] = value
elif weight_type == "bias_hh_l0":
UpperCAmelCase_ : int = value
elif weight_type == "weight_ih_l1":
UpperCAmelCase_ : List[Any] = value
elif weight_type == "weight_hh_l1":
UpperCAmelCase_ : int = value
elif weight_type == "bias_ih_l1":
UpperCAmelCase_ : int = value
elif weight_type == "bias_hh_l1":
UpperCAmelCase_ : List[str] = value
else:
UpperCAmelCase_ : int = value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase_, UpperCAmelCase_ : Dict = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = []
if model_name == "encodec_24khz" or "encodec_32khz":
UpperCAmelCase_ : Tuple = MAPPING_24K
elif model_name == "encodec_48khz":
UpperCAmelCase_ : str = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(_lowercase , _lowercase ):
logger.info(f'''{name} was ignored''' )
continue
UpperCAmelCase_ : int = False
for key, mapped_key in MAPPING.items():
if "*" in key:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = key.split('''.*.''' )
if prefix in name and suffix in name:
UpperCAmelCase_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
UpperCAmelCase_ : int = True
if "*" in mapped_key:
UpperCAmelCase_ : Optional[int] = name.split(_lowercase )[0].split('''.''' )[-2]
UpperCAmelCase_ : Optional[int] = mapped_key.replace('''*''' , _lowercase )
if "weight_g" in name:
UpperCAmelCase_ : List[Any] = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase_ : Tuple = '''weight_v'''
elif "weight_ih_l0" in name:
UpperCAmelCase_ : Optional[int] = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
UpperCAmelCase_ : Optional[int] = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
UpperCAmelCase_ : Dict = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
UpperCAmelCase_ : Any = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
UpperCAmelCase_ : int = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
UpperCAmelCase_ : int = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
UpperCAmelCase_ : Dict = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
UpperCAmelCase_ : Union[str, Any] = '''bias_hh_l1'''
elif "bias" in name:
UpperCAmelCase_ : List[str] = '''bias'''
elif "weight" in name:
UpperCAmelCase_ : int = '''weight'''
elif "running_mean" in name:
UpperCAmelCase_ : Tuple = '''running_mean'''
elif "running_var" in name:
UpperCAmelCase_ : Dict = '''running_var'''
elif "num_batches_tracked" in name:
UpperCAmelCase_ : Tuple = '''num_batches_tracked'''
else:
UpperCAmelCase_ : Dict = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ : int = EncodecConfig.from_pretrained(_lowercase )
else:
UpperCAmelCase_ : List[str] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
UpperCAmelCase_ : Union[str, Any] = [8, 5, 4, 4]
UpperCAmelCase_ : str = [2.2]
UpperCAmelCase_ : Optional[Any] = 64
UpperCAmelCase_ : str = 32000
UpperCAmelCase_ : Tuple = 2048
UpperCAmelCase_ : str = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : int = False
elif model_name == "encodec_48khz":
UpperCAmelCase_ : List[str] = [8, 5, 4, 2]
UpperCAmelCase_ : Optional[int] = [3.0, 6.0, 12.0, 24.0]
UpperCAmelCase_ : Optional[int] = 48000
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Any = '''time_group_norm'''
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Union[str, Any] = 1.0
UpperCAmelCase_ : List[Any] = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
UpperCAmelCase_ : int = EncodecModel(_lowercase )
UpperCAmelCase_ : List[Any] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_lowercase )
UpperCAmelCase_ : int = torch.load(_lowercase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
UpperCAmelCase_ : Optional[int] = original_checkpoint['''best_state''']
recursively_load_weights(_lowercase , _lowercase , _lowercase )
model.save_pretrained(_lowercase )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(_lowercase )
model.push_to_hub(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__a = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 30 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 847
SCREAMING_SNAKE_CASE__ = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 150
SCREAMING_SNAKE_CASE__ = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 171
SCREAMING_SNAKE_CASE__ = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
SCREAMING_SNAKE_CASE__ = 133
SCREAMING_SNAKE_CASE__ = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 19
SCREAMING_SNAKE_CASE__ = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 65
SCREAMING_SNAKE_CASE__ = """mapillary-vistas-id2label.json"""
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] ):
SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = val
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: bool = False ):
SCREAMING_SNAKE_CASE__ = get_maskformer_config(UpperCamelCase__ )
# load original state_dict
with open(UpperCamelCase__ , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# update to torch tensors
for key, value in state_dict.items():
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
# load 🤗 model
SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(UpperCamelCase__ )
model.eval()
for name, param in model.named_parameters():
print(UpperCamelCase__ , param.shape )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCamelCase__ ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
SCREAMING_SNAKE_CASE__ = prepare_img()
if "vistas" in model_name:
SCREAMING_SNAKE_CASE__ = 65
elif "cityscapes" in model_name:
SCREAMING_SNAKE_CASE__ = 65_535
else:
SCREAMING_SNAKE_CASE__ = 255
SCREAMING_SNAKE_CASE__ = True if """ade""" in model_name else False
SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=UpperCamelCase__ , reduce_labels=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase__ )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCamelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 6 | 0 |
import qiskit
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
SCREAMING_SNAKE_CASE_ = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ = qiskit.QuantumCircuit(__UpperCAmelCase , __UpperCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
SCREAMING_SNAKE_CASE_ = qiskit.execute(__UpperCAmelCase , __UpperCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__UpperCAmelCase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 31 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowerCamelCase = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowerCamelCase = 0
_lowerCamelCase = 0XE0_00
_lowerCamelCase = 0XE0_01
_lowerCamelCase = 0XE0_02
_lowerCamelCase = 0XE0_03
_lowerCamelCase = 0XE0_04
# Maps special codepoints to human-readable names.
_lowerCamelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :str , __A :str=chr(__A ) , __A :str=chr(__A ) , __A :Dict=chr(__A ) , __A :str=chr(__A ) , __A :Union[str, Any]=chr(__A ) , __A :str=chr(__A ) , __A :int=False , __A :int=2048 , **__A :Dict , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE__ = len(self._special_codepoints )
@property
def _snake_case ( self :Optional[Any] ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def _snake_case ( self :Tuple , __A :str ) -> List[str]:
"""simple docstring"""
return list(__A )
def _snake_case ( self :Optional[Any] , __A :str ) -> int:
"""simple docstring"""
try:
return ord(__A )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def _snake_case ( self :str , __A :int ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__A )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Any:
"""simple docstring"""
return "".join(__A )
def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
result += ([0] * len(__A )) + [1]
return result
def _snake_case ( self :List[str] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Any:
"""simple docstring"""
return () | 6 | 0 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __UpperCamelCase :
def __init__( self ):
_UpperCAmelCase = ''''''
_UpperCAmelCase = ''''''
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = 256
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = cva.imread(_UpperCamelCase , 0 )
_UpperCAmelCase = copy.deepcopy(self.img )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
_UpperCAmelCase = np.sum(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
_UpperCAmelCase = x[i] / self.k
self.sk += prk
_UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
_UpperCAmelCase = int(last % last )
_UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_UpperCamelCase )
_UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
_UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
_UpperCAmelCase = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def UpperCamelCase( self ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCamelCase( self ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCAmelCase_ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
UpperCAmelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image() | 32 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_lowerCamelCase = 'bert-base-cased'
_lowerCamelCase = 'fp16'
_lowerCamelCase = 'bf16'
_lowerCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def _snake_case ( self :List[Any] ) -> Tuple:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = f'''{i + 1}'''
SCREAMING_SNAKE_CASE__ = strategy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = prefetch_policy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = state_dict_type
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A )
for policy in FSDP_AUTO_WRAP_POLICY:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = policy
if policy == "TRANSFORMER_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """2000"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """T5Layer"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
with self.assertRaises(__A ) as cm:
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """0"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _snake_case ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = mp_dtype
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = Accelerator()
if mp_dtype == "fp16":
SCREAMING_SNAKE_CASE__ = torch.floataa
elif mp_dtype == "bf16":
SCREAMING_SNAKE_CASE__ = torch.bfloataa
SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__A )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = str(__A ).lower()
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = 0.8_2
SCREAMING_SNAKE_CASE__ = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
SCREAMING_SNAKE_CASE__ = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def _snake_case ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" )
SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
SCREAMING_SNAKE_CASE__ = cmd.copy()
for i, strategy in enumerate(__A ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
SCREAMING_SNAKE_CASE__ = len(__A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
SCREAMING_SNAKE_CASE__ = cmd_config[:-1]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
SCREAMING_SNAKE_CASE__ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(__A ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() ) | 6 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __magic_name__ :
'''simple docstring'''
__lowercase : Optional[int] = MBartConfig
__lowercase : Tuple = {}
__lowercase : Optional[int] = 'gelu'
def __init__( self:Tuple , _a:int , _a:List[Any]=13 , _a:List[str]=7 , _a:Tuple=True , _a:Union[str, Any]=False , _a:Any=99 , _a:List[Any]=32 , _a:List[Any]=2 , _a:int=4 , _a:Dict=37 , _a:int=0.1 , _a:List[str]=0.1 , _a:Any=20 , _a:Any=2 , _a:List[str]=1 , _a:List[str]=0 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_mbart_inputs_dict(_a , _a , _a )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Tuple , _a:Any ):
snake_case__ = TFMBartModel(config=_a ).get_decoder()
snake_case__ = inputs_dict['''input_ids''']
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict['''attention_mask'''][:1, :]
snake_case__ = inputs_dict['''head_mask''']
snake_case__ = 1
# first forward pass
snake_case__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
snake_case__ , snake_case__ = outputs.to_tuple()
snake_case__ = past_key_values[1]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> List[str]:
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : str = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__lowercase : Dict = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__lowercase : Optional[int] = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowercase : Optional[int] = True
__lowercase : Tuple = False
__lowercase : List[str] = False
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:str , _a:Optional[int] , _a:Optional[int] , _a:List[Any] , _a:int ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = TFMBartModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_sentencepiece
@require_tokenizers
@require_tf
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
__lowercase : str = [
' UN Chief Says There Is No Military Solution in Syria',
]
__lowercase : Union[str, Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
__lowercase : Any = 'facebook/mbart-large-en-ro'
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , **_a:Optional[Any] ):
snake_case__ = self.translate_src_text(**_a )
self.assertListEqual(self.expected_text , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:Optional[int] ):
snake_case__ = self.tokenizer(self.src_text , **_a , return_tensors='''tf''' )
snake_case__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
snake_case__ = self.tokenizer.batch_decode(_a , skip_special_tokens=_a )
return generated_words
@slow
def SCREAMING_SNAKE_CASE__ ( self:int ):
self._assert_generated_batch_equal_expected()
| 33 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCamelCase = logging.get_logger(__name__)
# General docstring
_lowerCamelCase = 'PoolFormerConfig'
# Base docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = [1, 512, 7, 7]
# Image classification docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = 'tabby, tabby cat'
_lowerCamelCase = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ):
if drop_prob == 0.0 or not training:
return input
SCREAMING_SNAKE_CASE__ = 1 - drop_prob
SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor
return output
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = drop_prob
def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return drop_path(__A , self.drop_prob , self.training )
def _snake_case ( self :Dict ) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size)
SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride)
SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding)
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A )
SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity()
def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.projection(__A )
SCREAMING_SNAKE_CASE__ = self.norm(__A )
return embeddings
class UpperCamelCase_ ( nn.GroupNorm ):
def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict:
"""simple docstring"""
super().__init__(1 , __A , **__A )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :List[str] , __A :Optional[int] ) -> Any:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A )
def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.pool(__A ) - hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A )
if isinstance(config.hidden_act , __A ):
SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE__ = config.hidden_act
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.act_fn(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
# Useful for training neural nets
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity()
SCREAMING_SNAKE_CASE__ = config.use_layer_scale
if config.use_layer_scale:
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str:
"""simple docstring"""
if self.use_layer_scale:
SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = ()
SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
else:
SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) )
# First residual connection
SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states
SCREAMING_SNAKE_CASE__ = ()
# Second residual connection inside the PoolFormerOutput block
SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) )
SCREAMING_SNAKE_CASE__ = hidden_states + layer_output
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = config
# stochastic depth decay rule
SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
SCREAMING_SNAKE_CASE__ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
# Transformer blocks
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
SCREAMING_SNAKE_CASE__ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__A ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None
SCREAMING_SNAKE_CASE__ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers
# Get patch embeddings from hidden_states
SCREAMING_SNAKE_CASE__ = embedding_layer(__A )
# Send the embeddings through the blocks
for _, blk in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = blk(__A )
SCREAMING_SNAKE_CASE__ = layer_outputs[0]
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PoolFormerConfig
lowerCamelCase_ = "poolformer"
lowerCamelCase_ = "pixel_values"
lowerCamelCase_ = True
def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict:
"""simple docstring"""
if isinstance(__A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any:
"""simple docstring"""
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = value
_lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Any ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A )
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
SCREAMING_SNAKE_CASE__ = self.encoder(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :int , __A :Optional[int] ) -> Tuple:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size )
def _snake_case ( self :List[Any] , __A :Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dense(__A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :str , __A :Union[str, Any] ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config.num_labels
SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A )
# Final norm
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
SCREAMING_SNAKE_CASE__ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = self.poolformer(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = outputs[0]
SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) )
SCREAMING_SNAKE_CASE__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE__ = """single_label_classification"""
else:
SCREAMING_SNAKE_CASE__ = """multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE__ = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
if not return_dict:
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states ) | 6 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
SCREAMING_SNAKE_CASE_ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = XLNetConfig.from_json_file(_lowercase )
UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
UpperCamelCase = finetuning_task
UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCamelCase = XLNetForSequenceClassification(_lowercase )
elif "squad" in finetuning_task:
UpperCamelCase = finetuning_task
UpperCamelCase = XLNetForQuestionAnswering(_lowercase )
else:
UpperCamelCase = XLNetLMHeadModel(_lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowercase ,_lowercase ,_lowercase )
# Save pytorch-model
UpperCamelCase = os.path.join(_lowercase ,_lowercase )
UpperCamelCase = os.path.join(_lowercase ,_lowercase )
print(f'Save PyTorch model to {os.path.abspath(_lowercase )}' )
torch.save(model.state_dict() ,_lowercase )
print(f'Save configuration file to {os.path.abspath(_lowercase )}' )
with open(_lowercase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
) | 34 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_lengths
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = gelu_activation
SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings
SCREAMING_SNAKE_CASE__ = causal
SCREAMING_SNAKE_CASE__ = asm
SCREAMING_SNAKE_CASE__ = n_langs
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_special
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = summary_type
SCREAMING_SNAKE_CASE__ = use_proj
SCREAMING_SNAKE_CASE__ = scope
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 )
def _snake_case ( self :int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__A )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__A )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__A )
@slow
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(config=__A )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE__ = torch.jit.trace(
__A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) )
SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A )
loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__A )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) ) | 6 | 0 |
def a ( A__ = 1_0_0_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple=True ):
model.train()
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any]=False ):
set_seed(42 )
SCREAMING_SNAKE_CASE__ = RegressionModel()
SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
# Test when on a single CPU or GPU that the context manager does nothing
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
# Test on distributed setup that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=False , UpperCamelCase__: Union[str, Any]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if iteration < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if batch_num < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(UpperCamelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(UpperCamelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 6 | 0 |
from __future__ import annotations
import time
__lowercase : Optional[Any] = list[tuple[int, int]]
__lowercase : Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : Optional[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Tuple = pos_x
snake_case : List[str] = pos_y
snake_case : Optional[int] = (pos_y, pos_x)
snake_case : List[str] = goal_x
snake_case : str = goal_y
snake_case : int = parent
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Union[str, Any] = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = [self.start]
snake_case : int = False
def snake_case_ ( self ):
'''simple docstring'''
while self.node_queue:
snake_case : Any = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case : List[Any] = True
return self.retrace_path(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = self.get_successors(SCREAMING_SNAKE_CASE_ )
for node in successors:
self.node_queue.append(SCREAMING_SNAKE_CASE_ )
if not self.reached:
return [self.start.pos]
return None
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : int = []
for action in delta:
snake_case : Optional[Any] = parent.pos_x + action[1]
snake_case : Optional[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,self.target.pos_y ,self.target.pos_x ,SCREAMING_SNAKE_CASE_ ) )
return successors
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[Any] = node
snake_case : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case : Any = current_node.parent
path.reverse()
return path
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : int = BreadthFirstSearch(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Any = BreadthFirstSearch(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : int = False
def snake_case_ ( self ):
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case : List[str] = self.fwd_bfs.node_queue.pop(0 )
snake_case : Union[str, Any] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case : Optional[int] = True
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = current_bwd_node
snake_case : Any = current_fwd_node
snake_case : Optional[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(SCREAMING_SNAKE_CASE_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(SCREAMING_SNAKE_CASE_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(SCREAMING_SNAKE_CASE_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = self.fwd_bfs.retrace_path(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = self.bwd_bfs.retrace_path(SCREAMING_SNAKE_CASE_ )
bwd_path.pop()
bwd_path.reverse()
snake_case : Optional[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase : List[Any] = (0, 0)
__lowercase : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase : Optional[int] = time.time()
__lowercase : Optional[int] = BreadthFirstSearch(init, goal)
__lowercase : Any = bfs.search()
__lowercase : List[str] = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase : List[str] = time.time()
__lowercase : Dict = BidirectionalBreadthFirstSearch(init, goal)
__lowercase : Union[str, Any] = bd_bfs.search()
__lowercase : Optional[Any] = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 36 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "AutoImageProcessor"
lowerCamelCase_ = "AutoTokenizer"
def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict:
"""simple docstring"""
super().__init__(__A , __A )
SCREAMING_SNAKE_CASE__ = self.image_processor
def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"] | 6 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 42
_lowercase = 42
def __init__( self : str , lowerCamelCase__ : UNetaDModel , lowerCamelCase__ : KarrasVeScheduler ):
super().__init__()
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self : Dict , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , **lowerCamelCase__ : Optional[int] , ):
a__ : int = self.unet.config.sample_size
a__ : Any = (batch_size, 3, img_size, img_size)
a__ : List[Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
a__ : List[Any] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
a__ : Dict = self.scheduler.schedule[t]
a__ : List[Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
a__, a__ : List[str] = self.scheduler.add_noise_to_input(lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
a__ : Optional[int] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
a__ : Any = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
a__ : str = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
a__ : Optional[int] = self.scheduler.step_correct(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , step_output.prev_sample , step_output["derivative"] , )
a__ : str = step_output.prev_sample
a__ : int = (sample / 2 + 0.5).clamp(0 , 1 )
a__ : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a__ : str = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 37 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ = True
for i in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE__ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE__ = dp[i][j - 1]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE__ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
SCREAMING_SNAKE_CASE__ = s - 2 * j
break
return diff | 6 | 0 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A_ : Union[str, Any] = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , ):
snake_case__ : str = [file for file in os.listdir(__SCREAMING_SNAKE_CASE ) if os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )]
if identifier is not None:
snake_case__ : Optional[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for n_ in n_identifier:
snake_case__ : Any = [file for file in files if n_ not in file]
else:
snake_case__ : int = [file for file in files if n_identifier not in file]
snake_case__ : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
snake_case__ : str = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __SCREAMING_SNAKE_CASE )
if only_modules:
snake_case__ : Optional[int] = file.split(""".""" )[0]
try:
snake_case__ : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = doctest.DocTestSuite(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = unittest.TextTestRunner().run(__SCREAMING_SNAKE_CASE )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"{module_identifier} is not a module." )
else:
snake_case__ : List[Any] = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __UpperCamelCase ( self ):
snake_case__ : Any = Path("""src/transformers""" )
snake_case__ : Any = """modeling"""
snake_case__ : Optional[int] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = Path("""src/transformers""" )
snake_case__ : Optional[Any] = """tokenization"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = Path("""src/transformers""" )
snake_case__ : Dict = """configuration"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = Path("""src/transformers""" )
snake_case__ : Optional[int] = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , n_identifier=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = Path("""docs/source""" )
snake_case__ : Dict = ["""favicon.ico"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE , only_modules=__SCREAMING_SNAKE_CASE )
| 38 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ):
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 6 | 0 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
snake_case_ = str(bin(SCREAMING_SNAKE_CASE__ ) )[2:] # remove the leading "0b"
snake_case_ = str(bin(SCREAMING_SNAKE_CASE__ ) )[2:]
snake_case_ = max(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE__ ) , b_binary.zfill(SCREAMING_SNAKE_CASE__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "encoder-decoder"
lowerCamelCase_ = True
def __init__( self :Optional[int] , **__A :str ) -> int:
"""simple docstring"""
super().__init__(**__A )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = True
@classmethod
def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A )
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.encoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.decoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output | 6 | 0 |
import os
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : str = os.path.join(os.path.dirname(snake_case__ ) , 'num.txt' )
with open(snake_case__ ) as file_hand:
return str(sum(int(snake_case__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class UpperCamelCase_ ( UpperCamelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase_ = Features({"text": Value("string" )} )
lowerCamelCase_ = Features({"labels": ClassLabel} )
lowerCamelCase_ = "text"
lowerCamelCase_ = "labels"
def _snake_case ( self :Any , __A :Dict ) -> Optional[Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ = self.label_schema.copy()
SCREAMING_SNAKE_CASE__ = features[self.label_column]
SCREAMING_SNAKE_CASE__ = label_schema
return task_template
@property
def _snake_case ( self :str ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
} | 6 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.