code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
SCREAMING_SNAKE_CASE :str = ['text', 'image', 'audio']
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(a_ , a_ ):
inputs.append(create_inputs(a_ ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
__A = []
for output in outputs:
if isinstance(a_ , (str, AgentText) ):
output_types.append("text" )
elif isinstance(a_ , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(a_ , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class UpperCAmelCase :
'''simple docstring'''
def UpperCamelCase_ ( self : List[Any] ):
self.assertTrue(hasattr(self.tool ,"inputs" ) )
self.assertTrue(hasattr(self.tool ,"outputs" ) )
__A = self.tool.inputs
for _input in inputs:
if isinstance(_input ,A ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__A = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCamelCase_ ( self : int ):
__A = create_inputs(self.tool.inputs )
__A = self.tool(*A )
# There is a single output
if len(self.tool.outputs ) == 1:
__A = [outputs]
self.assertListEqual(output_types(A ) ,self.tool.outputs )
def UpperCamelCase_ ( self : str ):
self.assertTrue(hasattr(self.tool ,"description" ) )
self.assertTrue(hasattr(self.tool ,"default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def UpperCamelCase_ ( self : List[str] ):
__A = create_inputs(self.tool.inputs )
__A = self.tool(*A )
if not isinstance(A ,A ):
__A = [outputs]
self.assertEqual(len(A ) ,len(self.tool.outputs ) )
for output, output_type in zip(A ,self.tool.outputs ):
__A = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(A ,A ) )
def UpperCamelCase_ ( self : str ):
__A = create_inputs(self.tool.inputs )
__A = []
for _input, input_type in zip(A ,self.tool.inputs ):
if isinstance(A ,A ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__A = self.tool(*A )
if not isinstance(A ,A ):
__A = [outputs]
self.assertEqual(len(A ) ,len(self.tool.outputs ) )
| 55 |
SCREAMING_SNAKE_CASE :List[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :Union[str, Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :int = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
assert len(str(a_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
__A = year // 1_0_0
__A = (5 * (century % 4) + 2) % 7
__A = year % 1_0_0
__A = centurian % 1_2
__A = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__A = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__A = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 1 |
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(a_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 55 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase ( a_ = "isbn/0140328726" ) -> dict:
"""simple docstring"""
__A = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
__A = F'''{olid} is not a valid Open Library olid'''
raise ValueError(a_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def UpperCAmelCase ( a_ ) -> dict:
"""simple docstring"""
__A = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
__A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__A = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
__A = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(a_ , a_ ):
__A = ", ".join(a_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE :int = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
SCREAMING_SNAKE_CASE :Any = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 55 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
snake_case_ = "CIDAS/clipseg-rd64-refined"
snake_case_ = "image_segmenter"
snake_case_ = CLIPSegForImageSegmentation
snake_case_ = ["image", "text"]
snake_case_ = ["image"]
def __init__( self : List[Any] ,*A : List[Any] ,**A : Dict ):
requires_backends(self ,["vision"] )
super().__init__(*A ,**A )
def UpperCamelCase_ ( self : int ,A : "Image" ,A : str ):
return self.pre_processor(text=[label] ,images=[image] ,padding=A ,return_tensors="pt" )
def UpperCamelCase_ ( self : str ,A : List[Any] ):
with torch.no_grad():
__A = self.model(**A ).logits
return logits
def UpperCamelCase_ ( self : List[str] ,A : str ):
__A = outputs.cpu().detach().numpy()
__A = 0
__A = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 55 |
import requests
SCREAMING_SNAKE_CASE :List[str] = 'YOUR API KEY'
def UpperCAmelCase ( a_ , a_ = giphy_api_key ) -> list:
"""simple docstring"""
__A = "+".join(query.split() )
__A = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
__A = requests.get(a_ ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 55 | 1 |
from collections import Counter
from timeit import timeit
def UpperCAmelCase ( a_ = "" , ) -> bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def UpperCAmelCase ( a_ = "" ) -> bool:
"""simple docstring"""
if len(a_ ) == 0:
return True
__A = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__A = {}
for character in lower_case_input_str:
__A = character_freq_dict.get(a_ , 0 ) + 1
__A = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def UpperCAmelCase ( a_ = "" ) -> None:
"""simple docstring"""
print("\nFor string = " , a_ , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(a_ ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(a_ ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
SCREAMING_SNAKE_CASE :str = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 55 |
import itertools
import math
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__A = 2
while True:
if is_prime(a_ ):
yield num
num += 1
def UpperCAmelCase ( a_ = 1_0_0_0_1 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , a_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE :Optional[int] = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[Any] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Tuple = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Tuple = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[int] = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[str] = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
__A = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__A = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(a_ ):
os.makedirs(a_ )
__A = model.state_dict()
def to_tf_var_name(a_ ):
for patt, repl in iter(a_ ):
__A = name.replace(a_ , a_ )
return F'''bert/{name}'''
def create_tf_var(a_ , a_ , a_ ):
__A = tf.dtypes.as_dtype(tensor.dtype )
__A = tf.get_variable(dtype=a_ , shape=tensor.shape , name=a_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__A = to_tf_var_name(a_ )
__A = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__A = torch_tensor.T
__A = create_tf_var(tensor=a_ , name=a_ , session=a_ )
tf.keras.backend.set_value(a_ , a_ )
__A = session.run(a_ )
print(F'''Successfully created {tf_name}: {np.allclose(a_ , a_ )}''' )
__A = tf.train.Saver(tf.trainable_variables() )
saver.save(a_ , os.path.join(a_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def UpperCAmelCase ( a_=None ) -> List[Any]:
"""simple docstring"""
__A = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=a_ , required=a_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=a_ , default=a_ , required=a_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=a_ , required=a_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=a_ , required=a_ , help="Directory in which to save tensorflow model" )
__A = parser.parse_args(a_ )
__A = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 55 | 1 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
SCREAMING_SNAKE_CASE :List[str] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
SCREAMING_SNAKE_CASE :str = 'main'
# Default branch name
SCREAMING_SNAKE_CASE :Optional[int] = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
SCREAMING_SNAKE_CASE :List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
SCREAMING_SNAKE_CASE :Any = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
SCREAMING_SNAKE_CASE :Tuple = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
print("Bonjour!" )
yield
print("Au revoir!" )
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : int ):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.mock.patch("sys.stdout" ,new_callable=io.StringIO )
def UpperCamelCase_ ( self : int ,A : str ):
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() ,"Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" ,new_callable=io.StringIO )
def UpperCamelCase_ ( self : Optional[Any] ,A : int ):
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,"Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" ,new_callable=io.StringIO )
def UpperCamelCase_ ( self : List[Any] ,A : List[Any] ):
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,"Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def UpperCamelCase_ ( self : List[Any] ):
self.assertEqual(find_labels(A ) ,["labels"] )
self.assertEqual(find_labels(A ) ,["labels", "next_sentence_label"] )
self.assertEqual(find_labels(A ) ,["start_positions", "end_positions"] )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
self.assertEqual(find_labels(A ) ,["labels"] )
@require_tf
def UpperCamelCase_ ( self : str ):
self.assertEqual(find_labels(A ) ,["labels"] )
self.assertEqual(find_labels(A ) ,["labels", "next_sentence_label"] )
self.assertEqual(find_labels(A ) ,["start_positions", "end_positions"] )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
self.assertEqual(find_labels(A ) ,["labels"] )
@require_flax
def UpperCamelCase_ ( self : List[Any] ):
# Flax models don't have labels
self.assertEqual(find_labels(A ) ,[] )
self.assertEqual(find_labels(A ) ,[] )
self.assertEqual(find_labels(A ) ,[] )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
self.assertEqual(find_labels(A ) ,[] )
| 55 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :Any = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 | 1 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = CLIPConfig
snake_case_ = ["CLIPEncoderLayer"]
def __init__( self : Tuple ,A : CLIPConfig ):
super().__init__(A )
__A = CLIPVisionModelWithProjection(config.vision_config )
__A = nn.Linear(config.vision_config.projection_dim ,1 )
__A = nn.Linear(config.vision_config.projection_dim ,1 )
@torch.no_grad()
def UpperCamelCase_ ( self : Tuple ,A : List[str] ,A : Any ,A : Optional[Any]=0.5 ,A : List[Any]=0.5 ):
__A = self.vision_model(A )[0]
__A = self.p_head(A )
__A = nsfw_detected.flatten()
__A = nsfw_detected > p_threshold
__A = nsfw_detected.tolist()
if any(A ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(A ):
if nsfw_detected_:
__A = np.zeros(images[idx].shape )
__A = self.w_head(A )
__A = watermark_detected.flatten()
__A = watermark_detected > w_threshold
__A = watermark_detected.tolist()
if any(A ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(A ):
if watermark_detected_:
__A = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 55 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE :int = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def UpperCAmelCase ( a_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__A = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
__A = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
__A = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 55 | 1 |
SCREAMING_SNAKE_CASE :Optional[Any] = range(2, 20 + 1)
SCREAMING_SNAKE_CASE :Optional[int] = [10**k for k in range(ks[-1] + 1)]
SCREAMING_SNAKE_CASE :dict[int, dict[int, list[list[int]]]] = {}
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
__A = sum(a_i[j] for j in range(a_ , len(a_ ) ) )
__A = sum(a_i[j] * base[j] for j in range(min(len(a_ ) , a_ ) ) )
__A , __A = 0, 0
__A = n - i
__A = memo.get(a_ )
if sub_memo is not None:
__A = sub_memo.get(a_ )
if jumps is not None and len(a_ ) > 0:
# find and make the largest jump without going over
__A = -1
for _k in range(len(a_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__A = _k
break
if max_jump >= 0:
__A , __A , __A = jumps[max_jump]
# since the difference between jumps is cached, add c
__A = diff + c
for j in range(min(a_ , len(a_ ) ) ):
__A , __A = divmod(a_ , 1_0 )
if new_c > 0:
add(a_ , a_ , a_ )
else:
__A = []
else:
__A = {c: []}
__A = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__A , __A = next_term(a_ , k - 1 , i + dn , a_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__A , __A = compute(a_ , a_ , i + dn , a_ )
diff += _diff
dn += terms_jumped
__A = sub_memo[c]
# keep jumps sorted by # of terms skipped
__A = 0
while j < len(a_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(a_ , (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(a_ ):
a_i.extend([0 for _ in range(k - len(a_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__A = i
__A , __A , __A = 0, 0, 0
for j in range(len(a_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__A = ds_c + ds_b
diff += addend
__A = 0
for j in range(a_ ):
__A = a_i[j] + addend
__A , __A = divmod(a_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(a_ , a_ , a_ )
return diff, i - start_i
def UpperCAmelCase ( a_ , a_ , a_ ) -> Any:
"""simple docstring"""
for j in range(a_ , len(a_ ) ):
__A = digits[j] + addend
if s >= 1_0:
__A , __A = divmod(a_ , 1_0 )
__A = addend // 1_0 + quotient
else:
__A = s
__A = addend // 1_0
if addend == 0:
break
while addend > 0:
__A , __A = divmod(a_ , 1_0 )
digits.append(a_ )
def UpperCAmelCase ( a_ = 1_0**1_5 ) -> int:
"""simple docstring"""
__A = [1]
__A = 1
__A = 0
while True:
__A , __A = next_term(a_ , 2_0 , i + dn , a_ )
dn += terms_jumped
if dn == n - i:
break
__A = 0
for j in range(len(a_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] ):
__A = tempfile.mkdtemp()
__A = BlipImageProcessor()
__A = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
__A = BlipaProcessor(A ,A )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ,**A : int ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).tokenizer
def UpperCamelCase_ ( self : Dict ,**A : Optional[int] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).image_processor
def UpperCamelCase_ ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : Optional[int] ):
__A = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
__A = [Image.fromarray(np.moveaxis(A ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Any ):
__A = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
__A = self.get_image_processor(do_normalize=A ,padding_value=1.0 )
__A = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=A ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = self.prepare_image_inputs()
__A = image_processor(A ,return_tensors="np" )
__A = processor(images=A ,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self : Tuple ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = processor(text=A )
__A = tokenizer(A ,return_token_type_ids=A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self : int ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A = processor.batch_decode(A )
__A = tokenizer.batch_decode(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
| 55 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
SCREAMING_SNAKE_CASE :int = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
snake_case_ = 10000
snake_case_ = None
snake_case_ = None
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
snake_case_ = ParquetConfig
def UpperCamelCase_ ( self : Dict ):
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase_ ( self : str ,A : Tuple ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__A = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A ,(str, list, tuple) ):
__A = data_files
if isinstance(A ,A ):
__A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A = [dl_manager.iter_files(A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"files": files} )]
__A = []
for split_name, files in data_files.items():
if isinstance(A ,A ):
__A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A = [dl_manager.iter_files(A ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(A ):
with open(A ,"rb" ) as f:
__A = datasets.Features.from_arrow_schema(pq.read_schema(A ) )
break
splits.append(datasets.SplitGenerator(name=A ,gen_kwargs={"files": files} ) )
return splits
def UpperCamelCase_ ( self : Optional[int] ,A : pa.Table ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__A = table_cast(A ,self.info.features.arrow_schema )
return pa_table
def UpperCamelCase_ ( self : int ,A : Optional[int] ):
__A = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(A ) ):
with open(A ,"rb" ) as f:
__A = pq.ParquetFile(A )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size ,columns=self.config.columns ) ):
__A = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(A )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(A )}: {e}''' )
raise
| 55 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,A : Any ,A : List[str] ,A : Union[str, Any]=10_24 ,A : int=10_24 ,A : Optional[Any]=3.6 ):
__A = tokenizer
__A = tokenizer.bos_token_id
__A = dataset
__A = seq_length
__A = seq_length * chars_per_token * num_of_sequences
def __iter__( self : List[Any] ):
__A = iter(self.dataset )
__A = True
while more_examples:
__A , __A = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
__A = False
break
__A = tokenizer(A ,truncation=A )["input_ids"]
__A = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 ,len(A ) ,self.seq_length ):
__A = all_token_ids[i : i + self.seq_length]
if len(A ) == self.seq_length:
yield torch.tensor(A )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = {"streaming": True}
__A = load_dataset(args.dataset_name , split="train" , **a_ )
__A = ConstantLengthDataset(a_ , a_ , seq_length=args.seq_length )
__A = DataLoader(a_ , batch_size=args.batch_size )
return eval_dataloader
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
model.eval()
__A = []
for step, batch in enumerate(a_ ):
with torch.no_grad():
__A = model(a_ , labels=a_ )
__A = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(a_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__A = torch.mean(torch.cat(a_ ) )
try:
__A = torch.exp(a_ )
except OverflowError:
__A = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
SCREAMING_SNAKE_CASE :Optional[int] = Accelerator()
# Parse configuration
SCREAMING_SNAKE_CASE :str = HfArgumentParser(EvaluationArguments)
SCREAMING_SNAKE_CASE :int = parser.parse_args()
set_seed(args.seed)
# Logging
SCREAMING_SNAKE_CASE :Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
SCREAMING_SNAKE_CASE :List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
SCREAMING_SNAKE_CASE :int = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
SCREAMING_SNAKE_CASE :List[str] = create_dataloader(args)
# Prepare everything with our `accelerator`.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 55 | 1 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCAmelCase ( a_ = "laptop" ) -> DataFrame:
"""simple docstring"""
__A = F'''https://www.amazon.in/laptop/s?k={product}'''
__A = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
__A = BeautifulSoup(requests.get(a_ , headers=a_ ).text )
# Initialize a Pandas dataframe with the column titles
__A = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
__A = item.ha.text
__A = "https://www.amazon.in/" + item.ha.a["href"]
__A = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
__A = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
__A = "Not available"
try:
__A = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
__A = ""
try:
__A = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 1_0_0 )
except ValueError:
__A = float("nan" )
except AttributeError:
pass
__A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__A = " "
__A = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :List[Any] = 'headphones'
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 55 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = LayoutLMTokenizer
snake_case_ = LayoutLMTokenizerFast
snake_case_ = True
snake_case_ = True
def UpperCamelCase_ ( self : Any ):
super().setUp()
__A = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self : Tuple ,**A : int ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : Optional[Any] ,A : Any ):
__A = "UNwant\u00E9d,running"
__A = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ ( self : str ):
__A = self.tokenizer_class(self.vocab_file )
__A = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(A ,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ ( self : int ):
pass
| 55 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE :List[str] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Tuple = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Any = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Dict = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Tuple = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 |
SCREAMING_SNAKE_CASE :int = {str(digit): digit**5 for digit in range(10)}
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a_ ) )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(a_ ) )
if __name__ == "__main__":
print(solution())
| 55 | 1 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
SCREAMING_SNAKE_CASE :Tuple = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
SCREAMING_SNAKE_CASE :Dict = get_tests_dir('fixtures/vocab.json')
SCREAMING_SNAKE_CASE :Optional[int] = get_tests_dir('fixtures')
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def UpperCamelCase_ ( self : Optional[int] ):
__A = 0
def UpperCamelCase_ ( self : Tuple ):
__A = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(A ,A )
def UpperCamelCase_ ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
__A = WavaVecaConfig()
__A = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(A )
processor.save_pretrained(A )
__A = AutoProcessor.from_pretrained(A )
self.assertIsInstance(A ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(A ,os.path.join(A ,A ) )
copyfile(A ,os.path.join(A ,"vocab.json" ) )
__A = AutoProcessor.from_pretrained(A )
self.assertIsInstance(A ,A )
def UpperCamelCase_ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
__A = WavaVecaFeatureExtractor()
__A = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
__A = WavaVecaProcessor(A ,A )
# save in new folder
processor.save_pretrained(A )
# drop `processor_class` in tokenizer
with open(os.path.join(A ,A ) ,"r" ) as f:
__A = json.load(A )
config_dict.pop("processor_class" )
with open(os.path.join(A ,A ) ,"w" ) as f:
f.write(json.dumps(A ) )
__A = AutoProcessor.from_pretrained(A )
self.assertIsInstance(A ,A )
def UpperCamelCase_ ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
__A = WavaVecaFeatureExtractor()
__A = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
__A = WavaVecaProcessor(A ,A )
# save in new folder
processor.save_pretrained(A )
# drop `processor_class` in feature extractor
with open(os.path.join(A ,A ) ,"r" ) as f:
__A = json.load(A )
config_dict.pop("processor_class" )
with open(os.path.join(A ,A ) ,"w" ) as f:
f.write(json.dumps(A ) )
__A = AutoProcessor.from_pretrained(A )
self.assertIsInstance(A ,A )
def UpperCamelCase_ ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
__A = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(A )
# copy relevant files
copyfile(A ,os.path.join(A ,"vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(A ,A ) ,"w" ) as f:
f.write("{}" )
__A = AutoProcessor.from_pretrained(A )
self.assertIsInstance(A ,A )
def UpperCamelCase_ ( self : Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(A ):
__A = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A ):
__A = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" ,trust_remote_code=A )
__A = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" ,trust_remote_code=A )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ ,"NewProcessor" )
__A = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ ,"NewFeatureExtractor" )
__A = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,"NewTokenizerFast" )
# Test we can also load the slow version
__A = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" ,trust_remote_code=A ,use_fast=A )
__A = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ ,"NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ ,"NewTokenizer" )
def UpperCamelCase_ ( self : Optional[int] ):
try:
AutoConfig.register("custom" ,A )
AutoFeatureExtractor.register(A ,A )
AutoTokenizer.register(A ,slow_tokenizer_class=A )
AutoProcessor.register(A ,A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A ):
AutoProcessor.register(A ,A )
# Now that the config is registered, it can be used as any other config with the auto-API
__A = CustomFeatureExtractor.from_pretrained(A )
with tempfile.TemporaryDirectory() as tmp_dir:
__A = os.path.join(A ,"vocab.txt" )
with open(A ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__A = CustomTokenizer(A )
__A = CustomProcessor(A ,A )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(A )
__A = AutoProcessor.from_pretrained(A )
self.assertIsInstance(A ,A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase_ ( self : Optional[Any] ):
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = False
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = False
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" ,A )
AutoFeatureExtractor.register(A ,A )
AutoTokenizer.register(A ,slow_tokenizer_class=A )
AutoProcessor.register(A ,A )
# If remote code is not set, the default is to use local classes.
__A = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ ,"NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
__A = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" ,trust_remote_code=A )
self.assertEqual(processor.__class__.__name__ ,"NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
__A = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" ,trust_remote_code=A )
self.assertEqual(processor.__class__.__name__ ,"NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase_ ( self : str ):
__A = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ ,"BertTokenizerFast" )
def UpperCamelCase_ ( self : int ):
__A = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ ,"ConvNextImageProcessor" )
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def UpperCamelCase_ ( cls : Any ):
__A = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] ):
try:
delete_repo(token=cls._token ,repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-processor" )
except HTTPError:
pass
def UpperCamelCase_ ( self : Optional[int] ):
__A = WavaVecaProcessor.from_pretrained(A )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(A ,"test-processor" ) ,push_to_hub=A ,use_auth_token=self._token )
__A = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(A ,getattr(new_processor.feature_extractor ,A ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() ,processor.tokenizer.get_vocab() )
def UpperCamelCase_ ( self : List[str] ):
__A = WavaVecaProcessor.from_pretrained(A )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(A ,"test-processor-org" ) ,push_to_hub=A ,use_auth_token=self._token ,organization="valid_org" ,)
__A = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(A ,getattr(new_processor.feature_extractor ,A ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() ,processor.tokenizer.get_vocab() )
def UpperCamelCase_ ( self : str ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
__A = CustomFeatureExtractor.from_pretrained(A )
with tempfile.TemporaryDirectory() as tmp_dir:
__A = os.path.join(A ,"vocab.txt" )
with open(A ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__A = CustomTokenizer(A )
__A = CustomProcessor(A ,A )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' ,token=self._token )
__A = Repository(A ,clone_from=f'''{USER}/test-dynamic-processor''' ,token=self._token )
processor.save_pretrained(A )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map ,{
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} ,)
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(A ,"tokenizer_config.json" ) ) as f:
__A = json.load(A )
self.assertDictEqual(
tokenizer_config["auto_map"] ,{
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} ,)
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(A ,"custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(A ,"custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(A ,"custom_processing.py" ) ) )
repo.push_to_hub()
__A = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' ,trust_remote_code=A )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ ,"CustomProcessor" )
| 55 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
__A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A ,"tf_padding" ) )
self.parent.assertTrue(hasattr(A ,"depth_multiplier" ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ,A : int ,A : List[Any]=13 ,A : int=3 ,A : Optional[Any]=32 ,A : Union[str, Any]=0.25 ,A : Tuple=8 ,A : Optional[int]=True ,A : Union[str, Any]=10_24 ,A : Any=32 ,A : Optional[int]="relu6" ,A : int=0.1 ,A : Optional[Any]=0.02 ,A : Optional[Any]=True ,A : List[str]=True ,A : str=10 ,A : str=None ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = depth_multiplier
__A = min_depth
__A = tf_padding
__A = int(last_hidden_size * depth_multiplier )
__A = output_stride
__A = hidden_act
__A = classifier_dropout_prob
__A = use_labels
__A = is_training
__A = num_labels
__A = initializer_range
__A = scope
def UpperCamelCase_ ( self : Optional[int] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.num_labels )
__A = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
__A = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,min_depth=self.min_depth ,tf_padding=self.tf_padding ,hidden_act=self.hidden_act ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : Optional[int] ,A : str ,A : Tuple ,A : Optional[int] ,A : List[str] ):
__A = MobileNetVaModel(config=A )
model.to(A )
model.eval()
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase_ ( self : List[Any] ,A : Union[str, Any] ,A : List[Any] ,A : int ,A : Union[str, Any] ):
__A = self.num_labels
__A = MobileNetVaForImageClassification(A )
model.to(A )
model.eval()
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Tuple ):
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Any ):
__A = MobileNetVaModelTester(self )
__A = MobileNetVaConfigTester(self ,config_class=A ,has_text_modality=A )
def UpperCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def UpperCamelCase_ ( self : Any ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[int] ):
def check_hidden_states_output(A : List[Any] ,A : List[Any] ,A : Optional[int] ):
__A = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(A ,A ) )
__A = outputs.hidden_states
__A = 26
self.assertEqual(len(A ) ,A )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(A ,A ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = MobileNetVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : List[str] ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
__A = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
__A = model(**A )
# verify the logits
__A = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape ,A )
__A = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 1 |
def UpperCAmelCase ( a_ = 1_0_0_0 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , a_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str ,A : int ,A : int=2 ,A : Optional[Any]=3 ,A : Dict=4 ,A : Optional[int]=2 ,A : Union[str, Any]=7 ,A : List[str]=True ,A : Union[str, Any]=True ,A : Optional[int]=True ,A : Optional[int]=True ,A : Tuple=99 ,A : Optional[int]=36 ,A : Dict=3 ,A : str=4 ,A : Optional[Any]=37 ,A : Dict="gelu" ,A : Dict=0.1 ,A : Union[str, Any]=0.1 ,A : Union[str, Any]=5_12 ,A : Any=16 ,A : Union[str, Any]=2 ,A : List[Any]=0.02 ,A : List[Any]=6 ,A : Optional[int]=6 ,A : List[Any]=3 ,A : Union[str, Any]=4 ,A : Tuple=None ,A : List[str]=10_00 ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = patch_size
__A = text_seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = coordinate_size
__A = shape_size
__A = num_labels
__A = num_choices
__A = scope
__A = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__A = text_seq_length
__A = (image_size // patch_size) ** 2 + 1
__A = self.text_seq_length + self.image_seq_length
def UpperCamelCase_ ( self : int ):
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
__A = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__A = bbox[i, j, 3]
__A = bbox[i, j, 1]
__A = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__A = bbox[i, j, 2]
__A = bbox[i, j, 0]
__A = t
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.text_seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
__A = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self : Optional[int] ,A : List[str] ,A : Any ,A : Dict ,A : List[Any] ,A : Optional[int] ,A : Any ,A : Dict ,A : List[Any] ):
__A = LayoutLMvaModel(config=A )
model.to(A )
model.eval()
# text + image
__A = model(A ,pixel_values=A )
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__A = model(pixel_values=A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] ,A : Dict ,A : List[str] ,A : Any ,A : List[Any] ,A : Any ,A : Any ,A : Dict ,A : Optional[Any] ):
__A = self.num_labels
__A = LayoutLMvaForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : str ,A : Optional[Any] ,A : Dict ,A : str ,A : Tuple ,A : Union[str, Any] ,A : List[Any] ,A : Any ,A : Union[str, Any] ):
__A = self.num_labels
__A = LayoutLMvaForTokenClassification(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : int ,A : str ,A : List[str] ,A : int ,A : List[str] ,A : List[str] ,A : Dict ):
__A = LayoutLMvaForQuestionAnswering(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,start_positions=A ,end_positions=A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : str ,A : Any ,A : Any ,A : Tuple ,A : List[Any] ,A : Optional[Any] ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = LayoutLMvaModelTester(self )
__A = ConfigTester(self ,config_class=A ,hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ,A : int ,A : List[str] ,A : Dict=False ):
__A = copy.deepcopy(A )
if model_class in get_values(A ):
__A = {
k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous()
if isinstance(A ,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
__A = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in get_values(A ):
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=A ,)
return inputs_dict
def UpperCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A = type
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = LayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Any ):
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Dict ):
__A = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).pixel_values.to(A )
__A = torch.tensor([[1, 2]] )
__A = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__A = model(
input_ids=input_ids.to(A ) ,bbox=bbox.to(A ) ,pixel_values=pixel_values.to(A ) ,)
# verify the logits
__A = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape ,A )
__A = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,A ,atol=1E-4 ) )
| 55 | 1 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
def run_func(a_ ):
@wraps(a_ )
def run_in_eager_mode(*a_ , **a_ ):
return func(*a_ , **a_ )
@wraps(a_ )
@tf.function(experimental_compile=a_ )
def run_in_graph_mode(*a_ , **a_ ):
return func(*a_ , **a_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase ( a_ , a_ , a_ ) -> ["tf.Tensor"]:
"""simple docstring"""
__A = random.Random()
__A = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(a_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = 42
snake_case_ = 42
snake_case_ = "TensorFlow"
@property
def UpperCamelCase_ ( self : Tuple ):
return tf.__version__
def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,A : int ,A : int ):
# initialize GPU on separate process
__A = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__A = self._prepare_inference_func(A ,A ,A )
return self._measure_speed(_inference )
def UpperCamelCase_ ( self : str ,A : str ,A : int ,A : int ):
__A = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__A = self._prepare_train_func(A ,A ,A )
return self._measure_speed(_train )
def UpperCamelCase_ ( self : Dict ,A : str ,A : int ,A : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] ,A )
__A = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__A = self._prepare_inference_func(A ,A ,A )
return self._measure_memory(_inference )
def UpperCamelCase_ ( self : int ,A : str ,A : int ,A : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] ,A )
__A = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__A = self._prepare_train_func(A ,A ,A )
return self._measure_memory(_train )
def UpperCamelCase_ ( self : Tuple ,A : str ,A : int ,A : int ):
__A = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
__A = (
hasattr(A ,"architectures" )
and isinstance(config.architectures ,A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__A = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
__A = __import__("transformers" ,fromlist=[model_class] )
__A = getattr(A ,A )
__A = model_cls(A )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
__A = TF_MODEL_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
__A = config.vocab_size if hasattr(A ,"vocab_size" ) else config.encoder.vocab_size
__A = random_input_ids(A ,A ,A )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_decoder_forward():
return model(A ,decoder_input_ids=A ,training=A )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_forward():
return model(A ,training=A )
__A = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ ( self : Optional[int] ,A : str ,A : int ,A : int ):
__A = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
__A = (
hasattr(A ,"architectures" )
and isinstance(config.architectures ,A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__A = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
__A = __import__("transformers" ,fromlist=[model_class] )
__A = getattr(A ,A )
__A = model_cls(A )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
__A = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
__A = config.vocab_size if hasattr(A ,"vocab_size" ) else config.encoder.vocab_size
__A = random_input_ids(A ,A ,A )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_decoder_train():
__A = model(A ,decoder_input_ids=A ,labels=A ,training=A )[0]
__A = tf.gradients(A ,model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_train():
__A = model(A ,labels=A ,training=A )[0]
__A = tf.gradients(A ,model.trainable_variables )
return gradients
__A = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ ( self : str ,A : List[Any] ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(A ,repeat=1 ,number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__A = timeit.repeat(
A ,repeat=self.args.repeat ,number=10 ,)
return min(A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def UpperCamelCase_ ( self : Optional[Any] ,A : Callable[[], None] ):
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
__A = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
__A = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
__A = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__A = nvml.nvmlDeviceGetMemoryInfo(A )
__A = meminfo.used
__A = Memory(A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
__A = None
else:
__A = measure_peak_memory_cpu(A )
__A = Memory(A ) if isinstance(A ,A ) else memory_bytes
if self.args.trace_memory_line_by_line:
__A = stop_memory_tracing(A )
if memory is None:
__A = summary.total
else:
__A = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 55 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : List[str] ,A : str=7 ,A : Optional[Any]=3 ,A : Any=18 ,A : int=30 ,A : int=4_00 ,A : List[str]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=True ,A : Tuple=None ,A : Tuple=True ,A : Union[str, Any]=[0.5, 0.5, 0.5] ,A : str=[0.5, 0.5, 0.5] ,A : List[Any]=False ,):
__A = size if size is not None else {"height": 20, "width": 20}
__A = crop_size if crop_size is not None else {"height": 18, "width": 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_center_crop
__A = crop_size
__A = do_normalize
__A = image_mean
__A = image_std
__A = do_reduce_labels
def UpperCamelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(dataset[0]["file"] )
__A = Image.open(dataset[1]["file"] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(ds[0]["file"] )
__A = Image.open(ds[1]["file"] )
__A = Image.open(ds[2]["file"] )
__A = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = BeitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : List[Any] ):
__A = BeitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : int ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size" ) )
self.assertTrue(hasattr(A ,"do_center_crop" ) )
self.assertTrue(hasattr(A ,"center_crop" ) )
self.assertTrue(hasattr(A ,"do_normalize" ) )
self.assertTrue(hasattr(A ,"image_mean" ) )
self.assertTrue(hasattr(A ,"image_std" ) )
def UpperCamelCase_ ( self : List[str] ):
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels ,A )
__A = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=A )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels ,A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[str] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : str ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
__A = []
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__A = image_processing(image_inputs[0] ,maps[0] ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test not batched input (PIL images)
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched input (PIL images)
__A , __A = prepare_semantic_batch_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 1_50 )
__A = True
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
| 55 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
__A = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
__A = dict(zip(A ,range(len(A ) ) ) )
__A = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
__A = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_60_00,
"return_attention_mask": False,
"do_normalize": True,
}
__A = tempfile.mkdtemp()
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
__A = os.path.join(self.tmpdirname ,A )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(A ) + "\n" )
with open(self.feature_extraction_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(A ) + "\n" )
# load decoder from hub
__A = "hf-internal-testing/ngram-beam-search-decoder"
def UpperCamelCase_ ( self : Optional[Any] ,**A : Optional[int] ):
__A = self.add_kwargs_tokens_map.copy()
kwargs.update(A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : str ,**A : Dict ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : Optional[int] ,**A : Optional[Any] ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**A )
def UpperCamelCase_ ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.get_tokenizer()
__A = self.get_feature_extractor()
__A = self.get_decoder()
__A = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
processor.save_pretrained(self.tmpdirname )
__A = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__A = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(A ,"include" ):
WavaVecaProcessorWithLM(
tokenizer=A ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def UpperCamelCase_ ( self : int ):
__A = self.get_feature_extractor()
__A = self.get_tokenizer()
__A = self.get_decoder()
__A = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
__A = floats_list((3, 10_00) )
__A = feature_extractor(A ,return_tensors="np" )
__A = processor(A ,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.get_feature_extractor()
__A = self.get_tokenizer()
__A = self.get_decoder()
__A = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
__A = "This is a test string"
__A = processor(text=A )
__A = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self : List[str] ,A : str=(2, 10, 16) ,A : Tuple=77 ):
np.random.seed(A )
return np.random.rand(*A )
def UpperCamelCase_ ( self : List[str] ):
__A = self.get_feature_extractor()
__A = self.get_tokenizer()
__A = self.get_decoder()
__A = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
__A = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
__A = processor.decode(A )
__A = decoder.decode_beams(A )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual("</s> <s> </s>" ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def UpperCamelCase_ ( self : Any ,A : Optional[Any] ):
__A = self.get_feature_extractor()
__A = self.get_tokenizer()
__A = self.get_decoder()
__A = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
__A = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__A = processor.batch_decode(A )
else:
with get_context(A ).Pool() as pool:
__A = processor.batch_decode(A ,A )
__A = list(A )
with get_context("fork" ).Pool() as p:
__A = decoder.decode_beams_batch(A ,A )
__A , __A , __A = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A ,decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] ,decoded_processor.text )
self.assertListEqual(A ,decoded_processor.logit_score )
self.assertListEqual(A ,decoded_processor.lm_score )
def UpperCamelCase_ ( self : str ):
__A = self.get_feature_extractor()
__A = self.get_tokenizer()
__A = self.get_decoder()
__A = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
__A = self._get_dummy_logits()
__A = 15
__A = -20.0
__A = -4.0
__A = processor.batch_decode(
A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,)
__A = decoded_processor_out.text
__A = list(A )
with get_context("fork" ).Pool() as pool:
__A = decoder.decode_beams_batch(
A ,A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,)
__A = [d[0][0] for d in decoded_decoder_out]
__A = [d[0][2] for d in decoded_decoder_out]
__A = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A ,A )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] ,A )
self.assertTrue(np.array_equal(A ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] ,A ,atol=1E-3 ) )
self.assertTrue(np.array_equal(A ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] ,A ,atol=1E-3 ) )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_feature_extractor()
__A = self.get_tokenizer()
__A = self.get_decoder()
__A = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
__A = self._get_dummy_logits()
__A = 2.0
__A = 5.0
__A = -20.0
__A = True
__A = processor.batch_decode(
A ,alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,)
__A = decoded_processor_out.text
__A = list(A )
decoder.reset_params(
alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,)
with get_context("fork" ).Pool() as pool:
__A = decoder.decode_beams_batch(
A ,A ,)
__A = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A ,A )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] ,A )
__A = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-20.0 )
self.assertEqual(lm_model.score_boundary ,A )
def UpperCamelCase_ ( self : str ):
__A = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__A = processor.decoder.model_container[processor.decoder._model_key]
__A = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
__A = os.listdir(A )
__A = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Any ):
__A = snapshot_download("hf-internal-testing/processor_with_lm" )
__A = WavaVecaProcessorWithLM.from_pretrained(A )
__A = processor.decoder.model_container[processor.decoder._model_key]
__A = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
__A = os.listdir(A )
__A = os.listdir(A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__A = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
__A = floats_list((3, 10_00) )
__A = processor_wavaveca(A ,return_tensors="np" )
__A = processor_auto(A ,return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1E-2 )
__A = self._get_dummy_logits()
__A = processor_wavaveca.batch_decode(A )
__A = processor_auto.batch_decode(A )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.get_feature_extractor()
__A = self.get_tokenizer()
__A = self.get_decoder()
__A = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg="`processor` and `feature_extractor` model input names do not match" ,)
@staticmethod
def UpperCamelCase_ ( A : Optional[Any] ,A : Tuple ):
__A = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self : str ):
__A = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__A = self._get_dummy_logits()[0]
__A = processor.decode(A ,output_word_offsets=A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(A ,A ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] ,"word" ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] ,"word" ) ,["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] ,"start_offset" ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] ,"end_offset" ) ,[1, 3, 5] )
def UpperCamelCase_ ( self : Dict ):
__A = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__A = self._get_dummy_logits()
__A = processor.batch_decode(A ,output_word_offsets=A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(A ,A ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(A ,"word" ) ) for o in outputs["word_offsets"]] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] ,"word" ) ,["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] ,"start_offset" ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] ,"end_offset" ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase_ ( self : Tuple ):
import torch
__A = load_dataset("common_voice" ,"en" ,split="train" ,streaming=A )
__A = ds.cast_column("audio" ,datasets.Audio(sampling_rate=1_60_00 ) )
__A = iter(A )
__A = next(A )
__A = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
__A = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__A = processor(sample["audio"]["array"] ,return_tensors="pt" ).input_values
with torch.no_grad():
__A = model(A ).logits.cpu().numpy()
__A = processor.decode(logits[0] ,output_word_offsets=A )
__A = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__A = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
__A = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(A ,"word" ) ) ,A )
self.assertEqual(" ".join(self.get_from_offsets(A ,"word" ) ) ,output.text )
# output times
__A = torch.tensor(self.get_from_offsets(A ,"start_time" ) )
__A = torch.tensor(self.get_from_offsets(A ,"end_time" ) )
# fmt: off
__A = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
__A = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A ,A ,atol=0.01 ) )
self.assertTrue(torch.allclose(A ,A ,atol=0.01 ) )
| 55 |
from numpy import exp, pi, sqrt
def UpperCAmelCase ( a_ , a_ = 0.0 , a_ = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 1 |
from math import factorial
SCREAMING_SNAKE_CASE :dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
if not isinstance(a_ , a_ ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(a_ ) )
def UpperCAmelCase ( a_ = 6_0 , a_ = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
if not isinstance(a_ , a_ ) or not isinstance(a_ , a_ ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
__A = 0
# the cached sizes of the previous chains
__A = {}
for start_chain_element in range(1 , a_ ):
# The temporary set will contain the elements of the chain
__A = set()
__A = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__A = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(a_ )
chain_set_length += 1
__A = digit_factorial_sum(a_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__A = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 55 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase_ ( self : Optional[int] ):
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__A = "xvjiarui/stable-diffusion-2-inpainting"
__A , __A = FlaxStableDiffusionInpaintPipeline.from_pretrained(A ,safety_checker=A )
__A = "Face of a yellow cat, high resolution, sitting on a park bench"
__A = jax.random.PRNGKey(0 )
__A = 50
__A = jax.device_count()
__A = num_samples * [prompt]
__A = num_samples * [init_image]
__A = num_samples * [mask_image]
__A , __A , __A = pipeline.prepare_inputs(A ,A ,A )
# shard inputs and rng
__A = replicate(A )
__A = jax.random.split(A ,jax.device_count() )
__A = shard(A )
__A = shard(A )
__A = shard(A )
__A = pipeline(
A ,A ,A ,A ,A ,A ,jit=A )
__A = output.images.reshape(A ,5_12 ,5_12 ,3 )
__A = images[0, 2_53:2_56, 2_53:2_56, -1]
__A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__A = jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 55 | 1 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
SCREAMING_SNAKE_CASE :List[Any] = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,A : int = 14 ):
if group not in primes:
raise ValueError("Unsupported Group" )
__A = primes[group]["prime"]
__A = primes[group]["generator"]
__A = int(hexlify(urandom(32 ) ) ,base=16 )
def UpperCamelCase_ ( self : Optional[int] ):
return hex(self.__private_key )[2:]
def UpperCamelCase_ ( self : List[str] ):
__A = pow(self.generator ,self.__private_key ,self.prime )
return hex(A )[2:]
def UpperCamelCase_ ( self : List[str] ,A : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(A ,(self.prime - 1) // 2 ,self.prime ) == 1
)
def UpperCamelCase_ ( self : Optional[int] ,A : str ):
__A = int(A ,base=16 )
if not self.is_valid_public_key(A ):
raise ValueError("Invalid public key" )
__A = pow(A ,self.__private_key ,self.prime )
return shaaaa(str(A ).encode() ).hexdigest()
@staticmethod
def UpperCamelCase_ ( A : int ,A : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(A ,(prime - 1) // 2 ,A ) == 1
)
@staticmethod
def UpperCamelCase_ ( A : str ,A : str ,A : int = 14 ):
__A = int(A ,base=16 )
__A = int(A ,base=16 )
__A = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(A ,A ):
raise ValueError("Invalid public key" )
__A = pow(A ,A ,A )
return shaaaa(str(A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : Optional[int] ,A : Optional[int]=7 ,A : Optional[Any]=3 ,A : List[str]=18 ,A : Any=30 ,A : Tuple=4_00 ,A : Union[str, Any]=True ,A : Optional[Any]=32 ,A : Union[str, Any]=True ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size_divisor
__A = do_rescale
def UpperCamelCase_ ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = GLPNImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : int ):
__A = GLPNImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Any ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size_divisor" ) )
self.assertTrue(hasattr(A ,"resample" ) )
self.assertTrue(hasattr(A ,"do_rescale" ) )
def UpperCamelCase_ ( self : str ):
pass
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : Optional[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 55 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = ["speech"]
def __init__( self : List[Any] ,*A : Optional[int] ,**A : List[str] ):
requires_backends(self ,["speech"] )
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = ["speech"]
def __init__( self : int ,*A : Optional[int] ,**A : Optional[int] ):
requires_backends(self ,["speech"] )
| 55 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"image": Image()} )
snake_case_ = Features({"labels": ClassLabel} )
snake_case_ = "image"
snake_case_ = "labels"
def UpperCamelCase_ ( self : Optional[Any] ,A : Tuple ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] ,A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__A = copy.deepcopy(self )
__A = self.label_schema.copy()
__A = features[self.label_column]
__A = label_schema
return task_template
@property
def UpperCamelCase_ ( self : Any ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 55 | 1 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 55 |
from math import sqrt
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' must been an int and positive"
__A = True
# 0 and 1 are none primes.
if number <= 1:
__A = False
for divisor in range(2 , int(round(sqrt(a_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__A = False
break
# precondition
assert isinstance(a_ , a_ ), "'status' must been from type bool"
return status
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__A = list(range(2 , n + 1 ) )
__A = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(a_ ) ):
for j in range(i + 1 , len(a_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__A = 0
# filters actual prime numbers.
__A = [x for x in begin_list if x != 0]
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
__A = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(a_ ):
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and number >= 0, "'number' must been an int and >= 0"
__A = [] # this list will be returns of the function.
# potential prime number factors.
__A = 2
__A = number
if number == 0 or number == 1:
ans.append(a_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(a_ ):
while quotient != 1:
if is_prime(a_ ) and (quotient % factor == 0):
ans.append(a_ )
quotient /= factor
else:
factor += 1
else:
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = max(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = min(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , a_ ), "compare bust been from type bool"
return number % 2 == 0
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , a_ ), "compare bust been from type bool"
return number % 2 != 0
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ ) and (number > 2) and is_even(a_ )
), "'number' must been an int, even and > 2"
__A = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__A = get_prime_numbers(a_ )
__A = len(a_ )
# run variable for while-loops.
__A = 0
__A = None
# exit variable. for break up the loops
__A = True
while i < len_pn and loop:
__A = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__A = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(a_ , a_ )
and (len(a_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__A = 0
while numbera != 0:
__A = numbera % numbera
__A = numbera
__A = rest
# precondition
assert isinstance(a_ , a_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__A = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__A = prime_factorization(a_ )
__A = prime_factorization(a_ )
elif numbera == 1 or numbera == 1:
__A = []
__A = []
__A = max(a_ , a_ )
__A = 0
__A = 0
__A = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__A = prime_fac_a.count(a_ )
__A = prime_fac_a.count(a_ )
for _ in range(max(a_ , a_ ) ):
ans *= n
else:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# precondition
assert isinstance(a_ , a_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'number' must been a positive int"
__A = 0
__A = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(a_ ):
ans += 1
# precondition
assert isinstance(a_ , a_ ) and is_prime(
a_ ), "'ans' must been a prime number and from type int"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
assert (
is_prime(a_ ) and is_prime(a_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__A = p_number_a + 1 # jump to the next number
__A = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
while number < p_number_a:
ans.append(a_ )
number += 1
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
# precondition
assert (
isinstance(a_ , a_ )
and ans[0] != p_number_a
and ans[len(a_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 1), "'n' must been int and >= 1"
__A = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(a_ )
# precondition
assert ans[0] == 1 and ans[len(a_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number > 1
), "'number' must been an int and >= 1"
__A = get_divisors(a_ )
# precondition
assert (
isinstance(a_ , a_ )
and (divisors[0] == 1)
and (divisors[len(a_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__A = gcd(abs(a_ ) , abs(a_ ) )
# precondition
assert (
isinstance(a_ , a_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been a int and >= 0"
__A = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been an int and >= 0"
__A = 0
__A = 1
__A = 1 # this will be return
for _ in range(n - 1 ):
__A = ans
ans += fiba
__A = tmp
return ans
| 55 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :Optional[Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :str = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 |
import os
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__A = os.path.dirname(os.path.realpath(a_ ) )
__A = os.path.join(a_ , "triangle.txt" )
with open(a_ ) as f:
__A = f.readlines()
__A = []
for line in triangle:
__A = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(a_ ) )
a.append(a_ )
for i in range(1 , len(a_ ) ):
for j in range(len(a[i] ) ):
__A = a[i - 1][j] if j != len(a[i - 1] ) else 0
__A = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(a_ , a_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 55 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : str = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE :Union[str, Any] = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE :List[str] = object()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
__A = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(a_ ) - len(a_ ) + 1 ):
__A = [x.match(a_ ) for x, y in zip(a_ , ks[i:] )]
if matches and all(a_ ):
return True
return False
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
def replace(a_ , a_ ):
for rule, replacement in rules:
if _match(a_ , a_ ):
return replacement
return val
return replace
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , a_ )),
(("transformer", "wte", "embedding"), P("mp" , a_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(a_ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , a_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(a_ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , a_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
__A = _get_partition_rules()
__A = _replacement_rules(a_ )
__A = {k: _unmatched for k in flatten_dict(a_ )}
__A = {k: replace(a_ , a_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(a_ ) )
| 55 | 0 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , _lowercase ).groups()[0]
class __lowerCamelCase (_a ):
def __init__( self: Optional[int],A_: Tuple,A_: Optional[Any]=None,A_: Dict=None ):
'''simple docstring'''
__UpperCamelCase = file_names
__UpperCamelCase = image_transform
__UpperCamelCase = label_to_id
def __len__( self: Any ):
'''simple docstring'''
return len(self.file_names )
def __getitem__( self: Dict,A_: Dict ):
'''simple docstring'''
__UpperCamelCase = self.file_names[idx]
__UpperCamelCase = PIL.Image.open(A_ )
__UpperCamelCase = raw_image.convert('RGB' )
if self.image_transform is not None:
__UpperCamelCase = self.image_transform(A_ )
__UpperCamelCase = extract_label(A_ )
if self.label_to_id is not None:
__UpperCamelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
if args.with_tracking:
__UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
__UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['lr']
__UpperCamelCase = int(config['num_epochs'] )
__UpperCamelCase = int(config['seed'] )
__UpperCamelCase = int(config['batch_size'] )
__UpperCamelCase = config['image_size']
if not isinstance(_lowercase , (list, tuple) ):
__UpperCamelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
__UpperCamelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__UpperCamelCase = int(args.checkpointing_steps )
else:
raise ValueError(
f'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
__UpperCamelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__UpperCamelCase = os.path.split(_lowercase )[-1].split('.' )[0]
accelerator.init_trackers(_lowercase , _lowercase )
# Grab all the image filenames
__UpperCamelCase = [os.path.join(args.data_dir , _lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
__UpperCamelCase = [extract_label(_lowercase ) for fname in file_names]
__UpperCamelCase = list(set(_lowercase ) )
id_to_label.sort()
__UpperCamelCase = {lbl: i for i, lbl in enumerate(_lowercase )}
# Set the seed before splitting the data.
np.random.seed(_lowercase )
torch.manual_seed(_lowercase )
torch.cuda.manual_seed_all(_lowercase )
# Split our filenames between train and validation
__UpperCamelCase = np.random.permutation(len(_lowercase ) )
__UpperCamelCase = int(0.8 * len(_lowercase ) )
__UpperCamelCase = random_perm[:cut]
__UpperCamelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__UpperCamelCase = Compose([RandomResizedCrop(_lowercase , scale=(0.5, 1.0) ), ToTensor()] )
__UpperCamelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_lowercase , label_to_id=_lowercase )
# For evaluation, we use a deterministic Resize
__UpperCamelCase = Compose([Resize(_lowercase ), ToTensor()] )
__UpperCamelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=_lowercase , label_to_id=_lowercase )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
__UpperCamelCase = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = create_model('resnet50d' , pretrained=_lowercase , num_classes=len(_lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__UpperCamelCase = False
for param in model.get_classifier().parameters():
__UpperCamelCase = True
# We normalize the batches of images to be a bit faster.
__UpperCamelCase = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
__UpperCamelCase = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
__UpperCamelCase = OneCycleLR(optimizer=_lowercase , max_lr=_lowercase , epochs=_lowercase , steps_per_epoch=len(_lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase = 0
# We also need to keep track of the starting epoch so files are named properly
__UpperCamelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
__UpperCamelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__UpperCamelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__UpperCamelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__UpperCamelCase = os.path.splitext(_lowercase )[0]
if "epoch" in training_difference:
__UpperCamelCase = int(training_difference.replace('epoch_' , '' ) ) + 1
__UpperCamelCase = None
else:
__UpperCamelCase = int(training_difference.replace('step_' , '' ) )
__UpperCamelCase = resume_step // len(_lowercase )
resume_step -= starting_epoch * len(_lowercase )
# Now we train the model
for epoch in range(_lowercase , _lowercase ):
model.train()
if args.with_tracking:
__UpperCamelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__UpperCamelCase = accelerator.skip_first_batches(_lowercase , _lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__UpperCamelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__UpperCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__UpperCamelCase = (batch['image'] - mean) / std
__UpperCamelCase = model(_lowercase )
__UpperCamelCase = torch.nn.functional.cross_entropy(_lowercase , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__UpperCamelCase = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
model.eval()
__UpperCamelCase = 0
__UpperCamelCase = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__UpperCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__UpperCamelCase = (batch['image'] - mean) / std
with torch.no_grad():
__UpperCamelCase = model(_lowercase )
__UpperCamelCase = outputs.argmax(dim=-1 )
__UpperCamelCase, __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['label']) )
__UpperCamelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__UpperCamelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}: {1_00 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 1_00 * eval_metric,
'train_loss': total_loss.item() / len(_lowercase ),
'epoch': epoch,
} , step=_lowercase , )
if checkpointing_steps == "epoch":
__UpperCamelCase = f'''epoch_{epoch}'''
if args.output_dir is not None:
__UpperCamelCase = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
if args.with_tracking:
accelerator.end_training()
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=_lowercase , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=_lowercase , default=_lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=_lowercase , default=_lowercase , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=_lowercase , default=_lowercase , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=_lowercase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 2_24}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] ,A : Union[str, Any] ,A : List[Any]=13 ,A : Optional[Any]=30 ,A : Union[str, Any]=2 ,A : Union[str, Any]=3 ,A : Any=True ,A : Dict=True ,A : str=32 ,A : Tuple=2 ,A : Optional[int]=4 ,A : Tuple=37 ,A : List[Any]="gelu" ,A : Dict=0.1 ,A : Optional[int]=0.1 ,A : List[Any]=10 ,A : Optional[Any]=0.02 ,A : Dict=3 ,A : Dict=None ,A : List[Any]=2 ,):
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = type_sequence_label_size
__A = initializer_range
__A = scope
__A = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__A = (image_size // patch_size) ** 2
__A = num_patches + 2
def UpperCamelCase_ ( self : List[Any] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Optional[int] ):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,A : Optional[int] ,A : Union[str, Any] ):
__A = TFDeiTModel(config=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[Any] ,A : List[Any] ,A : Optional[Any] ,A : Dict ):
__A = TFDeiTForMaskedImageModeling(config=A )
__A = model(A )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__A = 1
__A = TFDeiTForMaskedImageModeling(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase_ ( self : Optional[Any] ,A : Union[str, Any] ,A : Dict ,A : Union[str, Any] ):
__A = self.type_sequence_label_size
__A = TFDeiTForImageClassification(A )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A = 1
__A = TFDeiTForImageClassification(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : str ):
__A = TFDeiTModelTester(self )
__A = ConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def UpperCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
def UpperCamelCase_ ( self : List[Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A ,tf.keras.layers.Dense ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ,A : List[str] ,A : Optional[Any]=False ):
__A = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCamelCase_ ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFDeiTModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : int ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[int] ):
__A = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="tf" )
# forward pass
__A = model(**A )
# verify the logits
__A = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape ,A )
__A = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : str = "realm"
def __init__( self : List[Any] , __lowerCAmelCase : List[Any]=3_05_22 , __lowerCAmelCase : List[str]=7_68 , __lowerCAmelCase : str=1_28 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Tuple=30_72 , __lowerCAmelCase : Optional[int]="gelu_new" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : str=1E-12 , __lowerCAmelCase : Tuple=2_56 , __lowerCAmelCase : List[str]=10 , __lowerCAmelCase : Union[str, Any]=1E-3 , __lowerCAmelCase : Optional[Any]=5 , __lowerCAmelCase : List[Any]=3_20 , __lowerCAmelCase : Any=13_35_37_18 , __lowerCAmelCase : Tuple=50_00 , __lowerCAmelCase : str=1 , __lowerCAmelCase : List[str]=0 , __lowerCAmelCase : Union[str, Any]=2 , **__lowerCAmelCase : Dict , ) -> int:
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
# Common config
_A = vocab_size
_A = max_position_embeddings
_A = hidden_size
_A = retriever_proj_size
_A = num_hidden_layers
_A = num_attention_heads
_A = num_candidates
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = type_vocab_size
_A = layer_norm_eps
# Reader config
_A = span_hidden_size
_A = max_span_width
_A = reader_layer_norm_eps
_A = reader_beam_size
_A = reader_seq_len
# Retrieval config
_A = num_block_records
_A = searcher_beam_size
| 2 |
SCREAMING_SNAKE_CASE :List[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :Union[str, Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :int = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
assert len(str(a_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
__A = year // 1_0_0
__A = (5 * (century % 4) + 2) % 7
__A = year % 1_0_0
__A = centurian % 1_2
__A = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__A = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__A = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 0 |
'''simple docstring'''
def A_( A : int):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCamelCase = 1
UpperCamelCase = 1
while repunit:
UpperCamelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def A_( A : int = 100_0000):
UpperCamelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(A) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 3 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase ( a_ = "isbn/0140328726" ) -> dict:
"""simple docstring"""
__A = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
__A = F'''{olid} is not a valid Open Library olid'''
raise ValueError(a_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def UpperCAmelCase ( a_ ) -> dict:
"""simple docstring"""
__A = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
__A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__A = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
__A = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(a_ , a_ ):
__A = ", ".join(a_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE :int = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
SCREAMING_SNAKE_CASE :Any = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 55 | 0 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = StableDiffusionControlNetImgaImgPipeline
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCAmelCase = CLIPTextModel(_snake_case )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = 2
lowerCAmelCase = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , )
lowerCAmelCase = floats_tensor(control_image.shape , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((64, 64) )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = StableDiffusionControlNetImgaImgPipeline
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_snake_case ):
if isinstance(_snake_case , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
lowerCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
lowerCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCAmelCase = CLIPTextModel(_snake_case )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase = MultiControlNetModel([controlneta, controlneta] )
lowerCAmelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = 2
lowerCAmelCase = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
]
lowerCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((64, 64) )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
lowerCAmelCase = 10.0
lowerCAmelCase = 4
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = steps
lowerCAmelCase = scale
lowerCAmelCase = pipe(**_snake_case )[0]
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = steps
lowerCAmelCase = scale
lowerCAmelCase = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = steps
lowerCAmelCase = scale
lowerCAmelCase = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = steps
lowerCAmelCase = scale
lowerCAmelCase = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
lowerCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=_snake_case , controlnet=_snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase = 'evil space-punk bird'
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((5_12, 5_12) )
lowerCAmelCase = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((5_12, 5_12) )
lowerCAmelCase = pipe(
_snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type='np' , num_inference_steps=50 , strength=0.6 , )
lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9E-2
| 4 |
import requests
SCREAMING_SNAKE_CASE :List[str] = 'YOUR API KEY'
def UpperCAmelCase ( a_ , a_ = giphy_api_key ) -> list:
"""simple docstring"""
__A = "+".join(query.split() )
__A = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
__A = requests.get(a_ ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 55 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = '''Wav2Vec2FeatureExtractor'''
_lowercase : Optional[Any] = '''AutoTokenizer'''
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
super().__init__(_lowercase , _lowercase )
_lowerCAmelCase = self.feature_extractor
_lowerCAmelCase = False
@classmethod
def _lowercase ( cls , _lowercase , **_lowercase ):
"""simple docstring"""
try:
return super().from_pretrained(_lowercase , **_lowercase )
except OSError:
warnings.warn(
F'Loading a tokenizer inside {cls.__name__} from a config that does not'
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , _lowercase , )
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_lowercase , **_lowercase )
_lowerCAmelCase = WavaVecaCTCTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(feature_extractor=_lowercase , tokenizer=_lowercase )
def __call__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
_lowerCAmelCase = kwargs.pop("""raw_speech""" )
else:
_lowerCAmelCase = kwargs.pop("""audio""" , _lowercase )
_lowerCAmelCase = kwargs.pop("""sampling_rate""" , _lowercase )
_lowerCAmelCase = kwargs.pop("""text""" , _lowercase )
if len(_lowercase ) > 0:
_lowerCAmelCase = args[0]
_lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
_lowerCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if text is not None:
_lowerCAmelCase = self.tokenizer(_lowercase , **_lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCAmelCase = encodings["""input_ids"""]
return inputs
def _lowercase ( self , *_lowercase , **_lowercase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*_lowercase , **_lowercase )
_lowerCAmelCase = kwargs.pop("""input_features""" , _lowercase )
_lowerCAmelCase = kwargs.pop("""labels""" , _lowercase )
if len(_lowercase ) > 0:
_lowerCAmelCase = args[0]
_lowerCAmelCase = args[1:]
if input_features is not None:
_lowerCAmelCase = self.feature_extractor.pad(_lowercase , *_lowercase , **_lowercase )
if labels is not None:
_lowerCAmelCase = self.tokenizer.pad(_lowercase , **_lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_lowerCAmelCase = labels["""input_ids"""]
return input_features
def _lowercase ( self , *_lowercase , **_lowercase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def _lowercase ( self , *_lowercase , **_lowercase ):
"""simple docstring"""
return self.tokenizer.decode(*_lowercase , **_lowercase )
@contextmanager
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
_lowerCAmelCase = True
_lowerCAmelCase = self.tokenizer
yield
_lowerCAmelCase = self.feature_extractor
_lowerCAmelCase = False
| 5 |
import itertools
import math
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__A = 2
while True:
if is_prime(a_ ):
yield num
num += 1
def UpperCAmelCase ( a_ = 1_0_0_0_1 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , a_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 | 0 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] , UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = 0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE__ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 6 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
__A = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__A = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(a_ ):
os.makedirs(a_ )
__A = model.state_dict()
def to_tf_var_name(a_ ):
for patt, repl in iter(a_ ):
__A = name.replace(a_ , a_ )
return F'''bert/{name}'''
def create_tf_var(a_ , a_ , a_ ):
__A = tf.dtypes.as_dtype(tensor.dtype )
__A = tf.get_variable(dtype=a_ , shape=tensor.shape , name=a_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__A = to_tf_var_name(a_ )
__A = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__A = torch_tensor.T
__A = create_tf_var(tensor=a_ , name=a_ , session=a_ )
tf.keras.backend.set_value(a_ , a_ )
__A = session.run(a_ )
print(F'''Successfully created {tf_name}: {np.allclose(a_ , a_ )}''' )
__A = tf.train.Saver(tf.trainable_variables() )
saver.save(a_ , os.path.join(a_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def UpperCAmelCase ( a_=None ) -> List[Any]:
"""simple docstring"""
__A = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=a_ , required=a_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=a_ , default=a_ , required=a_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=a_ , required=a_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=a_ , required=a_ , help="Directory in which to save tensorflow model" )
__A = parser.parse_args(a_ )
__A = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 55 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''PoolFormerFeatureExtractor''']
a = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 7 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :Any = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
__A : Optional[Any] = str(bin(__snake_case ) )
binary_number += "0" * shift_amount
return binary_number
def _lowerCAmelCase ( __snake_case : int , __snake_case : int ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
__A : List[str] = str(bin(__snake_case ) )[2:]
if shift_amount >= len(__snake_case ):
return "0b0"
__A : Any = binary_number[: len(__snake_case ) - shift_amount]
return "0b" + shifted_binary_number
def _lowerCAmelCase ( __snake_case : int , __snake_case : int ) -> str:
if number >= 0: # Get binary representation of positive number
__A : Optional[Any] = '0' + str(bin(__snake_case ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
__A : Union[str, Any] = len(bin(__snake_case )[3:] ) # Find 2's complement of number
__A : Any = bin(abs(__snake_case ) - (1 << binary_number_length) )[3:]
__A : Optional[Any] = (
'1' + '0' * (binary_number_length - len(__snake_case )) + binary_number
)
if shift_amount >= len(__snake_case ):
return "0b" + binary_number[0] * len(__snake_case )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__snake_case ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE :int = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def UpperCAmelCase ( a_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__A = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
__A = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
__A = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 55 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE__ = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
SCREAMING_SNAKE_CASE__ = subprocess.check_output(f'git diff --name-only {fork_point_sha}'.split()).decode('''utf-8''').split()
SCREAMING_SNAKE_CASE__ = '''|'''.join(sys.argv[1:])
SCREAMING_SNAKE_CASE__ = re.compile(rf'^({joined_dirs}).*?\.py$')
SCREAMING_SNAKE_CASE__ = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 9 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] ):
__A = tempfile.mkdtemp()
__A = BlipImageProcessor()
__A = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
__A = BlipaProcessor(A ,A )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ,**A : int ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).tokenizer
def UpperCamelCase_ ( self : Dict ,**A : Optional[int] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).image_processor
def UpperCamelCase_ ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : Optional[int] ):
__A = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
__A = [Image.fromarray(np.moveaxis(A ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Any ):
__A = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
__A = self.get_image_processor(do_normalize=A ,padding_value=1.0 )
__A = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=A ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = self.prepare_image_inputs()
__A = image_processor(A ,return_tensors="np" )
__A = processor(images=A ,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self : Tuple ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = processor(text=A )
__A = tokenizer(A ,return_token_type_ids=A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self : int ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A = processor.batch_decode(A )
__A = tokenizer.batch_decode(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
| 55 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_A )
_UpperCamelCase = VersatileDiffusionPipeline.from_pretrained(_A , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = generator.manual_seed(0 )
_UpperCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = '''cyberpunk 2077'''
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe.dual_guided(
prompt=_A , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
_UpperCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_UpperCamelCase = '''A painting of a squirrel eating a burger '''
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe.text_to_image(
prompt=_A , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
_UpperCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_UpperCamelCase = pipe.image_variation(_A , generator=_A , output_type='''numpy''' ).images
_UpperCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 10 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,A : Any ,A : List[str] ,A : Union[str, Any]=10_24 ,A : int=10_24 ,A : Optional[Any]=3.6 ):
__A = tokenizer
__A = tokenizer.bos_token_id
__A = dataset
__A = seq_length
__A = seq_length * chars_per_token * num_of_sequences
def __iter__( self : List[Any] ):
__A = iter(self.dataset )
__A = True
while more_examples:
__A , __A = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
__A = False
break
__A = tokenizer(A ,truncation=A )["input_ids"]
__A = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 ,len(A ) ,self.seq_length ):
__A = all_token_ids[i : i + self.seq_length]
if len(A ) == self.seq_length:
yield torch.tensor(A )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = {"streaming": True}
__A = load_dataset(args.dataset_name , split="train" , **a_ )
__A = ConstantLengthDataset(a_ , a_ , seq_length=args.seq_length )
__A = DataLoader(a_ , batch_size=args.batch_size )
return eval_dataloader
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
model.eval()
__A = []
for step, batch in enumerate(a_ ):
with torch.no_grad():
__A = model(a_ , labels=a_ )
__A = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(a_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__A = torch.mean(torch.cat(a_ ) )
try:
__A = torch.exp(a_ )
except OverflowError:
__A = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
SCREAMING_SNAKE_CASE :Optional[int] = Accelerator()
# Parse configuration
SCREAMING_SNAKE_CASE :str = HfArgumentParser(EvaluationArguments)
SCREAMING_SNAKE_CASE :int = parser.parse_args()
set_seed(args.seed)
# Logging
SCREAMING_SNAKE_CASE :Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
SCREAMING_SNAKE_CASE :List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
SCREAMING_SNAKE_CASE :int = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
SCREAMING_SNAKE_CASE :List[str] = create_dataloader(args)
# Prepare everything with our `accelerator`.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 55 | 0 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __A ( A ):
'''simple docstring'''
@require_torch
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(A )
BertModel.from_pretrained(A )
BertTokenizer.from_pretrained(A )
pipeline(task='''fill-mask''' , model=A )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(A , env=A , check=A , capture_output=A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def a__ (self ) -> Dict:
"""simple docstring"""
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(A )
BertModel.from_pretrained(A )
BertTokenizer.from_pretrained(A )
pipeline(task='''fill-mask''' , model=A )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(A , env=A , check=A , capture_output=A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(A , env=A , check=A , capture_output=A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(A , env=A , check=A , capture_output=A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = '''
from transformers import pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
_a = self.get_env()
_a = '''1'''
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
_a = subprocess.run(A , env=A , check=A , capture_output=A )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = '''
from transformers import AutoModel
'''
_a = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(A , env=A , check=A , capture_output=A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(A , env=A , check=A , capture_output=A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 11 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = LayoutLMTokenizer
snake_case_ = LayoutLMTokenizerFast
snake_case_ = True
snake_case_ = True
def UpperCamelCase_ ( self : Any ):
super().setUp()
__A = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self : Tuple ,**A : int ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : Optional[Any] ,A : Any ):
__A = "UNwant\u00E9d,running"
__A = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ ( self : str ):
__A = self.tokenizer_class(self.vocab_file )
__A = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(A ,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ ( self : int ):
pass
| 55 | 0 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCamelCase__ : List[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowerCamelCase__ : Dict = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.15},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
lowerCamelCase__ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ : List[str] = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowerCamelCase__ : Optional[Any] = """allenai"""
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : Any = dict((re.sub(R"""@@$""" , """""" , lowercase_ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , lowercase_ ), v) for k, v in d.items() )
lowercase__ : List[str] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
lowercase__ : int = d[k] # restore
return da
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
assert os.path.exists(lowercase_ )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
lowercase__ : int = basename(lowercase_ )
lowercase__ : List[Any] = dirname(lowercase_ )
lowercase__ : List[str] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowercase__ : int = cls.hub_models()
lowercase__ : Tuple = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
lowercase__ : Optional[Any] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'using checkpoint {checkpoint_file}' )
lowercase__ : Optional[int] = hub_utils.from_pretrained(
lowercase_ , lowercase_ , lowercase_ , archive_map=lowercase_ , **lowercase_ )
lowercase__ : Tuple = vars(chkpt["""args"""]["""model"""] )
lowercase__ : List[Any] = args["""source_lang"""]
lowercase__ : Dict = args["""target_lang"""]
lowercase__ : Optional[Any] = dirname(lowercase_ )
lowercase__ : int = basename(lowercase_ )
# dicts
lowercase__ : Union[str, Any] = os.path.join(lowercase_ , F'dict.{src_lang}.txt' )
lowercase__ : Optional[int] = os.path.join(lowercase_ , F'dict.{tgt_lang}.txt' )
lowercase__ : Optional[int] = Dictionary.load(lowercase_ )
lowercase__ : Union[str, Any] = rewrite_dict_keys(src_dict.indices )
lowercase__ : str = len(lowercase_ )
lowercase__ : Any = os.path.join(lowercase_ , """vocab-src.json""" )
print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowercase__ : Any = True
for k in src_vocab.keys():
if not k.islower():
lowercase__ : List[str] = False
break
lowercase__ : Tuple = Dictionary.load(lowercase_ )
lowercase__ : Any = rewrite_dict_keys(tgt_dict.indices )
lowercase__ : Any = len(lowercase_ )
lowercase__ : Any = os.path.join(lowercase_ , """vocab-tgt.json""" )
print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# merges_file (bpecodes)
lowercase__ : Union[str, Any] = os.path.join(lowercase_ , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowercase__ : Optional[int] = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ):
break
with open(lowercase_ , encoding="""utf-8""" ) as fin:
lowercase__ : List[Any] = fin.read()
lowercase__ : Optional[Any] = re.sub(R""" \d+$""" , """""" , lowercase_ , 0 , re.M ) # remove frequency number
print(F'Generating {merges_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as fout:
fout.write(lowercase_ )
# model config
lowercase__ : Tuple = os.path.join(lowercase_ , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}'
lowercase__ : List[str] = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
lowercase__ : Optional[int] = 5
lowercase__ : List[str] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowercase__ : Optional[int] = best_score_hparams[model_dir]["""length_penalty"""]
else:
lowercase__ : Union[str, Any] = 1.0
print(F'Generating {fsmt_model_config_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# tokenizer config
lowercase__ : Optional[int] = os.path.join(lowercase_ , lowercase_ )
lowercase__ : int = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 10_24,
"""do_lower_case""": do_lower_case,
}
print(F'Generating {fsmt_tokenizer_config_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# model
lowercase__ : Dict = chkpt["""models"""][0]
lowercase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowercase__ : Any = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowercase__ : List[Any] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(lowercase_ , lowercase_ )
lowercase__ : str = FSMTConfig.from_pretrained(lowercase_ )
lowercase__ : List[str] = FSMTForConditionalGeneration(lowercase_ )
# check that it loads ok
model_new.load_state_dict(lowercase_ , strict=lowercase_ )
# save
lowercase__ : str = os.path.join(lowercase_ , lowercase_ )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase_ , lowercase_ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(F'cd {data_root}' )
print(F'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase__ : List[str] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 12 |
SCREAMING_SNAKE_CASE :int = {str(digit): digit**5 for digit in range(10)}
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a_ ) )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(a_ ) )
if __name__ == "__main__":
print(solution())
| 55 | 0 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
A__ : Optional[int] = """bart"""
A__ : Optional[int] = True
@st.cache(allow_output_mutation=UpperCAmelCase_ )
def UpperCAmelCase__ ( ) -> Optional[Any]:
if LOAD_DENSE_INDEX:
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__lowerCamelCase : Optional[int] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__lowerCamelCase : int = qar_model.eval()
else:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
__lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__lowerCamelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__lowerCamelCase : Dict = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__lowerCamelCase : Optional[Any] = sas_model.eval()
else:
__lowerCamelCase , __lowerCamelCase : List[Any] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCAmelCase_ )
def UpperCAmelCase__ ( ) -> int:
if LOAD_DENSE_INDEX:
__lowerCamelCase : List[str] = faiss.StandardGpuResources()
__lowerCamelCase : Any = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
__lowerCamelCase : List[Any] = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 1_28) , )
__lowerCamelCase : Any = faiss.IndexFlatIP(1_28 )
__lowerCamelCase : Dict = faiss.index_cpu_to_gpu(UpperCAmelCase_ , 1 , UpperCAmelCase_ )
wikiaab_gpu_index_flat.add(UpperCAmelCase_ ) # TODO fix for larger GPU
else:
__lowerCamelCase , __lowerCamelCase : Dict = (None, None)
__lowerCamelCase : Union[str, Any] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCAmelCase_ )
def UpperCAmelCase__ ( ) -> Dict:
__lowerCamelCase : Union[str, Any] = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__lowerCamelCase : Union[str, Any] = elia['train_eli5']
__lowerCamelCase : List[Any] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 1_28) )
__lowerCamelCase : int = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(UpperCAmelCase_ )
return (elia_train, eli5_train_q_index)
A__ , A__ , A__ : Dict = load_indexes()
A__ , A__ , A__ , A__ : str = load_models()
A__ , A__ : Dict = load_train_data()
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple=10 ) -> List[str]:
__lowerCamelCase : List[str] = embed_questions_for_retrieval([question] , UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : int = eli5_train_q_index.search(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = [elia_train[int(UpperCAmelCase_ )] for i in I[0]]
return nn_examples
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple="wiki40b" , UpperCAmelCase_ : Dict="dense" , UpperCAmelCase_ : int=10 ) -> Any:
if source == "none":
__lowerCamelCase , __lowerCamelCase : Optional[Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__lowerCamelCase , __lowerCamelCase : Optional[int] = query_qa_dense_index(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = query_es_index(
UpperCAmelCase_ , UpperCAmelCase_ , index_name='english_wiki40b_snippets_100w' , n_results=UpperCAmelCase_ , )
__lowerCamelCase : Tuple = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__lowerCamelCase : Any = 'question: {} context: {}'.format(UpperCAmelCase_ , UpperCAmelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCAmelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCAmelCase_ : None),
} )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any]=64 , UpperCAmelCase_ : Tuple=2_56 , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=0.95 , UpperCAmelCase_ : Union[str, Any]=0.8 ) -> Optional[int]:
with torch.no_grad():
__lowerCamelCase : Any = qa_sas_generate(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , num_answers=1 , num_beams=UpperCAmelCase_ , min_len=UpperCAmelCase_ , max_len=UpperCAmelCase_ , do_sample=UpperCAmelCase_ , temp=UpperCAmelCase_ , top_p=UpperCAmelCase_ , top_k=UpperCAmelCase_ , max_input_length=10_24 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
A__ : Optional[Any] = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
A__ : List[Any] = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
A__ : Any = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
A__ : Optional[Any] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
A__ : List[str] = st.sidebar.checkbox("""Demo options""")
if demo_options:
A__ : List[Any] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
A__ : str = action_list.index(action_st)
A__ : str = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
A__ : List[str] = show_type == """Show full text of passages"""
else:
A__ : Optional[Any] = 3
A__ : Optional[int] = True
A__ : Optional[int] = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
A__ : Optional[int] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
A__ : List[str] = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
A__ : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
A__ : Union[str, Any] = """wiki40b"""
A__ : Union[str, Any] = """dense"""
A__ : Dict = """beam"""
A__ : Dict = 2
A__ : List[Any] = 64
A__ : Dict = 256
A__ : Dict = None
A__ : Optional[int] = None
A__ : List[str] = st.sidebar.checkbox("""Generation options""")
if generate_options:
A__ : Any = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
A__ : Dict = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
A__ : int = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
A__ : Optional[int] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
A__ : List[Any] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
A__ : Any = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
A__ : int = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
A__ : int = None
# start main text
A__ : int = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
A__ : int = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
A__ : List[Any] = st.text_input("""Enter your question here:""", """""")
else:
A__ : Dict = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
A__ , A__ : Optional[int] = make_support(question, source=wiki_source, method="""dense""", n_results=10)
A__ , A__ : Tuple = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
A__ : str = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
A__ : Union[str, Any] = support_list[:10]
A__ : List[str] = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
A__ , A__ : int = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
A__ , A__ : Tuple = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
A__ : List[Any] = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
A__ : int = res[1].strip()
if sec_titles == "":
A__ : int = """[{}]({})""".format(res[0], wiki_url)
else:
A__ : Union[str, Any] = sec_titles.split(""" & """)
A__ : Optional[int] = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
A__ : Optional[Any] = find_nearest_training(question)
A__ : str = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
A__ : Optional[Any] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
A__ : Optional[Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 13 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
__A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A ,"tf_padding" ) )
self.parent.assertTrue(hasattr(A ,"depth_multiplier" ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ,A : int ,A : List[Any]=13 ,A : int=3 ,A : Optional[Any]=32 ,A : Union[str, Any]=0.25 ,A : Tuple=8 ,A : Optional[int]=True ,A : Union[str, Any]=10_24 ,A : Any=32 ,A : Optional[int]="relu6" ,A : int=0.1 ,A : Optional[Any]=0.02 ,A : Optional[Any]=True ,A : List[str]=True ,A : str=10 ,A : str=None ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = depth_multiplier
__A = min_depth
__A = tf_padding
__A = int(last_hidden_size * depth_multiplier )
__A = output_stride
__A = hidden_act
__A = classifier_dropout_prob
__A = use_labels
__A = is_training
__A = num_labels
__A = initializer_range
__A = scope
def UpperCamelCase_ ( self : Optional[int] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.num_labels )
__A = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
__A = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,min_depth=self.min_depth ,tf_padding=self.tf_padding ,hidden_act=self.hidden_act ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : Optional[int] ,A : str ,A : Tuple ,A : Optional[int] ,A : List[str] ):
__A = MobileNetVaModel(config=A )
model.to(A )
model.eval()
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase_ ( self : List[Any] ,A : Union[str, Any] ,A : List[Any] ,A : int ,A : Union[str, Any] ):
__A = self.num_labels
__A = MobileNetVaForImageClassification(A )
model.to(A )
model.eval()
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Tuple ):
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Any ):
__A = MobileNetVaModelTester(self )
__A = MobileNetVaConfigTester(self ,config_class=A ,has_text_modality=A )
def UpperCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def UpperCamelCase_ ( self : Any ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[int] ):
def check_hidden_states_output(A : List[Any] ,A : List[Any] ,A : Optional[int] ):
__A = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(A ,A ) )
__A = outputs.hidden_states
__A = 26
self.assertEqual(len(A ) ,A )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(A ,A ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = MobileNetVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : List[str] ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
__A = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
__A = model(**A )
# verify the logits
__A = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape ,A )
__A = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 14 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str ,A : int ,A : int=2 ,A : Optional[Any]=3 ,A : Dict=4 ,A : Optional[int]=2 ,A : Union[str, Any]=7 ,A : List[str]=True ,A : Union[str, Any]=True ,A : Optional[int]=True ,A : Optional[int]=True ,A : Tuple=99 ,A : Optional[int]=36 ,A : Dict=3 ,A : str=4 ,A : Optional[Any]=37 ,A : Dict="gelu" ,A : Dict=0.1 ,A : Union[str, Any]=0.1 ,A : Union[str, Any]=5_12 ,A : Any=16 ,A : Union[str, Any]=2 ,A : List[Any]=0.02 ,A : List[Any]=6 ,A : Optional[int]=6 ,A : List[Any]=3 ,A : Union[str, Any]=4 ,A : Tuple=None ,A : List[str]=10_00 ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = patch_size
__A = text_seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = coordinate_size
__A = shape_size
__A = num_labels
__A = num_choices
__A = scope
__A = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__A = text_seq_length
__A = (image_size // patch_size) ** 2 + 1
__A = self.text_seq_length + self.image_seq_length
def UpperCamelCase_ ( self : int ):
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
__A = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__A = bbox[i, j, 3]
__A = bbox[i, j, 1]
__A = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__A = bbox[i, j, 2]
__A = bbox[i, j, 0]
__A = t
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.text_seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
__A = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self : Optional[int] ,A : List[str] ,A : Any ,A : Dict ,A : List[Any] ,A : Optional[int] ,A : Any ,A : Dict ,A : List[Any] ):
__A = LayoutLMvaModel(config=A )
model.to(A )
model.eval()
# text + image
__A = model(A ,pixel_values=A )
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__A = model(pixel_values=A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] ,A : Dict ,A : List[str] ,A : Any ,A : List[Any] ,A : Any ,A : Any ,A : Dict ,A : Optional[Any] ):
__A = self.num_labels
__A = LayoutLMvaForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : str ,A : Optional[Any] ,A : Dict ,A : str ,A : Tuple ,A : Union[str, Any] ,A : List[Any] ,A : Any ,A : Union[str, Any] ):
__A = self.num_labels
__A = LayoutLMvaForTokenClassification(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : int ,A : str ,A : List[str] ,A : int ,A : List[str] ,A : List[str] ,A : Dict ):
__A = LayoutLMvaForQuestionAnswering(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,start_positions=A ,end_positions=A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : str ,A : Any ,A : Any ,A : Tuple ,A : List[Any] ,A : Optional[Any] ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = LayoutLMvaModelTester(self )
__A = ConfigTester(self ,config_class=A ,hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ,A : int ,A : List[str] ,A : Dict=False ):
__A = copy.deepcopy(A )
if model_class in get_values(A ):
__A = {
k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous()
if isinstance(A ,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
__A = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in get_values(A ):
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=A ,)
return inputs_dict
def UpperCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A = type
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = LayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Any ):
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Dict ):
__A = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).pixel_values.to(A )
__A = torch.tensor([[1, 2]] )
__A = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__A = model(
input_ids=input_ids.to(A ) ,bbox=bbox.to(A ) ,pixel_values=pixel_values.to(A ) ,)
# verify the logits
__A = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape ,A )
__A = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
A : Tuple = logging.get_logger(__name__)
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = ['''pixel_values''']
def __init__(self : Optional[int] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : int = 8 , **_UpperCAmelCase : int , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_pad
lowercase__ = pad_size
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Tuple ) -> np.ndarray:
"""simple docstring"""
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = get_image_size(_UpperCAmelCase )
lowercase__ = (old_height // size + 1) * size - old_height
lowercase__ = (old_width // size + 1) * size - old_width
return pad(_UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=_UpperCAmelCase )
def lowerCamelCase__ (self : int , _UpperCAmelCase : ImageInput , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[float] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_UpperCAmelCase : List[str] , ) -> int:
"""simple docstring"""
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_pad if do_pad is not None else self.do_pad
lowercase__ = pad_size if pad_size is not None else self.pad_size
lowercase__ = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_pad:
lowercase__ = [self.pad(_UpperCAmelCase , size=_UpperCAmelCase ) for image in images]
lowercase__ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
lowercase__ = {"""pixel_values""": images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 15 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : List[str] ,A : str=7 ,A : Optional[Any]=3 ,A : Any=18 ,A : int=30 ,A : int=4_00 ,A : List[str]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=True ,A : Tuple=None ,A : Tuple=True ,A : Union[str, Any]=[0.5, 0.5, 0.5] ,A : str=[0.5, 0.5, 0.5] ,A : List[Any]=False ,):
__A = size if size is not None else {"height": 20, "width": 20}
__A = crop_size if crop_size is not None else {"height": 18, "width": 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_center_crop
__A = crop_size
__A = do_normalize
__A = image_mean
__A = image_std
__A = do_reduce_labels
def UpperCamelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(dataset[0]["file"] )
__A = Image.open(dataset[1]["file"] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(ds[0]["file"] )
__A = Image.open(ds[1]["file"] )
__A = Image.open(ds[2]["file"] )
__A = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = BeitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : List[Any] ):
__A = BeitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : int ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size" ) )
self.assertTrue(hasattr(A ,"do_center_crop" ) )
self.assertTrue(hasattr(A ,"center_crop" ) )
self.assertTrue(hasattr(A ,"do_normalize" ) )
self.assertTrue(hasattr(A ,"image_mean" ) )
self.assertTrue(hasattr(A ,"image_std" ) )
def UpperCamelCase_ ( self : List[str] ):
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels ,A )
__A = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=A )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels ,A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[str] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : str ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
__A = []
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__A = image_processing(image_inputs[0] ,maps[0] ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test not batched input (PIL images)
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched input (PIL images)
__A , __A = prepare_semantic_batch_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 1_50 )
__A = True
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
| 55 | 0 |
__A : Tuple = {str(digit): digit**5 for digit in range(1_0)}
def __a ( A__ : int ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A__ ) )
def __a ( ):
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(A__ ) )
if __name__ == "__main__":
print(solution())
| 16 |
from numpy import exp, pi, sqrt
def UpperCAmelCase ( a_ , a_ = 0.0 , a_ = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = '''▁'''
UpperCAmelCase_ : List[str] = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase_ : Dict = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
UpperCAmelCase_ : Optional[Any] = {
'''facebook/m2m100_418M''': 1_024,
}
# fmt: off
UpperCAmelCase_ : List[Any] = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Optional[Any] = VOCAB_FILES_NAMES
_lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : str = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Any = ['''input_ids''', '''attention_mask''']
_lowercase : List[int] = []
_lowercase : List[int] = []
def __init__( self : Dict , __A : Optional[Any] , __A : Any , __A : Tuple=None , __A : List[str]=None , __A : int="<s>" , __A : Union[str, Any]="</s>" , __A : Optional[Any]="</s>" , __A : Tuple="<pad>" , __A : List[str]="<unk>" , __A : Optional[Any]="m2m100" , __A : Optional[Dict[str, Any]] = None , __A : Any=8 , **__A : List[str] , ):
__A : int = {} if sp_model_kwargs is None else sp_model_kwargs
__A : List[Any] = language_codes
__A : List[str] = FAIRSEQ_LANGUAGE_CODES[language_codes]
__A : str = {lang_code: F"""__{lang_code}__""" for lang_code in fairseq_language_code}
__A : Optional[int] = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__A )
for lang_code in fairseq_language_code
if self.get_lang_token(__A ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__A , tgt_lang=__A , bos_token=__A , eos_token=__A , sep_token=__A , unk_token=__A , pad_token=__A , language_codes=__A , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__A , **__A , )
__A : Any = vocab_file
__A : Any = load_json(__A )
__A : Union[str, Any] = {v: k for k, v in self.encoder.items()}
__A : Optional[int] = spm_file
__A : Dict = load_spm(__A , self.sp_model_kwargs )
__A : int = len(self.encoder )
__A : Optional[Any] = {
self.get_lang_token(__A ): self.encoder_size + i for i, lang_code in enumerate(__A )
}
__A : Any = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__A )}
__A : Union[str, Any] = {v: k for k, v in self.lang_token_to_id.items()}
__A : Tuple = src_lang if src_lang is not None else """en"""
__A : Dict = tgt_lang
__A : List[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__A : str = num_madeup_words
@property
def lowerCAmelCase_ ( self : Any ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def lowerCAmelCase_ ( self : int ):
return self._src_lang
@src_lang.setter
def lowerCAmelCase_ ( self : int , __A : str ):
__A : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase_ ( self : str , __A : str ):
return self.sp_model.encode(__A , out_type=__A )
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__A , self.encoder[self.unk_token] )
def lowerCAmelCase_ ( self : int , __A : int ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__A , self.unk_token )
def lowerCAmelCase_ ( self : Tuple , __A : Optional[Any] ):
__A : Dict = []
__A : Tuple = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__A ) + token
__A : Dict = []
else:
current_sub_tokens.append(__A )
out_string += self.sp_model.decode(__A )
return out_string.strip()
def lowerCAmelCase_ ( self : List[str] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
__A : Optional[Any] = [1] * len(self.prefix_tokens )
__A : Optional[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__A )) + suffix_ones
return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones
def lowerCAmelCase_ ( self : Optional[int] , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
__A : List[str] = self.__dict__.copy()
__A : Union[str, Any] = None
return state
def __setstate__( self : Dict , __A : Dict ):
__A : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__A : Union[str, Any] = {}
__A : str = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase_ ( self : str , __A : str , __A : Optional[str] = None ):
__A : str = Path(__A )
if not save_dir.is_dir():
raise OSError(F"""{save_directory} should be a directory""" )
__A : str = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
__A : Dict = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , __A )
if os.path.abspath(self.spm_file ) != os.path.abspath(__A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __A )
elif not os.path.isfile(self.spm_file ):
with open(__A , """wb""" ) as fi:
__A : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (str(__A ), str(__A ))
def lowerCAmelCase_ ( self : Union[str, Any] , __A : List[str] , __A : str = "en" , __A : Optional[List[str]] = None , __A : str = "ro" , **__A : Optional[int] , ):
__A : Any = src_lang
__A : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__A , __A , **__A )
def lowerCAmelCase_ ( self : int , __A : Dict , __A : Optional[str] , __A : Optional[str] , **__A : List[str] ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__A : List[Any] = src_lang
__A : str = self(__A , add_special_tokens=__A , **__A )
__A : Optional[int] = self.get_lang_id(__A )
__A : Optional[Any] = tgt_lang_id
return inputs
def lowerCAmelCase_ ( self : List[str] ):
self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase_ ( self : Optional[Any] ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase_ ( self : Dict , __A : str ):
__A : Any = self.get_lang_token(__A )
__A : Any = self.lang_token_to_id[lang_token]
__A : Any = [self.cur_lang_id]
__A : Optional[Any] = [self.eos_token_id]
def lowerCAmelCase_ ( self : int , __A : str ):
__A : Tuple = self.get_lang_token(__A )
__A : Dict = self.lang_token_to_id[lang_token]
__A : Union[str, Any] = [self.cur_lang_id]
__A : str = [self.eos_token_id]
def lowerCAmelCase_ ( self : Tuple , __A : str ):
return self.lang_code_to_token[lang]
def lowerCAmelCase_ ( self : str , __A : str ):
__A : List[Any] = self.get_lang_token(__A )
return self.lang_token_to_id[lang_token]
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
__A : Optional[int] = sentencepiece.SentencePieceProcessor(**a__ )
spm.Load(str(a__ ) )
return spm
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> Union[Dict, List]:
with open(a__ ,"""r""" ) as f:
return json.load(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : str ) -> None:
with open(a__ ,"""w""" ) as f:
json.dump(a__ ,a__ ,indent=2 )
| 17 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase_ ( self : Optional[int] ):
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__A = "xvjiarui/stable-diffusion-2-inpainting"
__A , __A = FlaxStableDiffusionInpaintPipeline.from_pretrained(A ,safety_checker=A )
__A = "Face of a yellow cat, high resolution, sitting on a park bench"
__A = jax.random.PRNGKey(0 )
__A = 50
__A = jax.device_count()
__A = num_samples * [prompt]
__A = num_samples * [init_image]
__A = num_samples * [mask_image]
__A , __A , __A = pipeline.prepare_inputs(A ,A ,A )
# shard inputs and rng
__A = replicate(A )
__A = jax.random.split(A ,jax.device_count() )
__A = shard(A )
__A = shard(A )
__A = shard(A )
__A = pipeline(
A ,A ,A ,A ,A ,A ,jit=A )
__A = output.images.reshape(A ,5_12 ,5_12 ,3 )
__A = images[0, 2_53:2_56, 2_53:2_56, -1]
__A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__A = jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 55 | 0 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_SCREAMING_SNAKE_CASE = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
_SCREAMING_SNAKE_CASE = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def _snake_case ( self ) -> Tuple:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="uniform_average" , _lowerCAmelCase=True ) -> Union[str, Any]:
_lowerCAmelCase = mean_squared_error(
_lowerCAmelCase , _lowerCAmelCase , sample_weight=_lowerCAmelCase , multioutput=_lowerCAmelCase , squared=_lowerCAmelCase )
return {"mse": mse}
| 18 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : Optional[int] ,A : Optional[int]=7 ,A : Optional[Any]=3 ,A : List[str]=18 ,A : Any=30 ,A : Tuple=4_00 ,A : Union[str, Any]=True ,A : Optional[Any]=32 ,A : Union[str, Any]=True ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size_divisor
__A = do_rescale
def UpperCamelCase_ ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = GLPNImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : int ):
__A = GLPNImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Any ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size_divisor" ) )
self.assertTrue(hasattr(A ,"resample" ) )
self.assertTrue(hasattr(A ,"do_rescale" ) )
def UpperCamelCase_ ( self : str ):
pass
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : Optional[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 55 | 0 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
if is_torch_version('''<''', '''2.0.0''' ) or not hasattr(__snake_case, '''_dynamo''' ):
return False
return isinstance(__snake_case, torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase__ ( __snake_case, __snake_case = True ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_UpperCamelCase = is_compiled_module(__snake_case )
if is_compiled:
_UpperCamelCase = model
_UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__snake_case, __snake_case ):
_UpperCamelCase = model.module
if not keep_fpaa_wrapper:
_UpperCamelCase = getattr(__snake_case, '''forward''' )
_UpperCamelCase = model.__dict__.pop('''_original_forward''', __snake_case )
if original_forward is not None:
while hasattr(__snake_case, '''__wrapped__''' ):
_UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
_UpperCamelCase = forward
if getattr(__snake_case, '''_converted_to_transformer_engine''', __snake_case ):
convert_model(__snake_case, to_transformer_engine=__snake_case )
if is_compiled:
_UpperCamelCase = model
_UpperCamelCase = compiled_model
return model
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
PartialState().wait_for_everyone()
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__snake_case, __snake_case )
elif PartialState().local_process_index == 0:
torch.save(__snake_case, __snake_case )
@contextmanager
def lowerCamelCase__ ( **__snake_case ) -> Tuple:
"""simple docstring"""
for key, value in kwargs.items():
_UpperCamelCase = str(__snake_case )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if not hasattr(__snake_case, '''__qualname__''' ) and not hasattr(__snake_case, '''__name__''' ):
_UpperCamelCase = getattr(__snake_case, '''__class__''', __snake_case )
if hasattr(__snake_case, '''__qualname__''' ):
return obj.__qualname__
if hasattr(__snake_case, '''__name__''' ):
return obj.__name__
return str(__snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(__snake_case, __snake_case ):
_UpperCamelCase = destination.setdefault(__snake_case, {} )
merge_dicts(__snake_case, __snake_case )
else:
_UpperCamelCase = value
return destination
def lowerCamelCase__ ( __snake_case = None ) -> bool:
"""simple docstring"""
if port is None:
_UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 19 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"image": Image()} )
snake_case_ = Features({"labels": ClassLabel} )
snake_case_ = "image"
snake_case_ = "labels"
def UpperCamelCase_ ( self : Optional[Any] ,A : Tuple ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] ,A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__A = copy.deepcopy(self )
__A = self.label_schema.copy()
__A = features[self.label_column]
__A = label_schema
return task_template
@property
def UpperCamelCase_ ( self : Any ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 55 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=2 , lowercase_=True , lowercase_=False , lowercase_=10 , lowercase_=3 , lowercase_=32 * 8 , lowercase_=32 * 8 , lowercase_=4 , lowercase_=64 , ) -> Union[str, Any]:
a__ =parent
a__ =batch_size
a__ =is_training
a__ =use_auxiliary_loss
a__ =num_queries
a__ =num_channels
a__ =min_size
a__ =max_size
a__ =num_labels
a__ =hidden_dim
a__ =hidden_dim
def __UpperCamelCase ( self) -> int:
a__ =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
lowercase_)
a__ =torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase_)
a__ =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase_) > 0.5
).float()
a__ =(torch.rand((self.batch_size, self.num_labels) , device=lowercase_) > 0.5).long()
a__ =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __UpperCamelCase ( self) -> List[Any]:
a__ =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
a__ =self.num_queries
a__ =self.num_labels
a__ =[1, 1, 1, 1]
a__ =self.num_channels
a__ =64
a__ =128
a__ =self.hidden_dim
a__ =self.hidden_dim
a__ =self.hidden_dim
return config
def __UpperCamelCase ( self) -> str:
a__ , a__ , a__ , a__ , a__ =self.prepare_config_and_inputs()
a__ ={'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> Union[str, Any]:
a__ =output.encoder_hidden_states
a__ =output.pixel_decoder_hidden_states
a__ =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowercase_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(lowercase_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(lowercase_) , config.decoder_layers)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False) -> Any:
with torch.no_grad():
a__ =MaskaFormerModel(config=lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(pixel_values=lowercase_ , pixel_mask=lowercase_)
a__ =model(lowercase_ , output_hidden_states=lowercase_)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(lowercase_ , lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Any:
a__ =MaskaFormerForUniversalSegmentation(config=lowercase_)
model.to(lowercase_)
model.eval()
def comm_check_on_output(lowercase_):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
a__ =model(pixel_values=lowercase_ , pixel_mask=lowercase_)
a__ =model(lowercase_)
comm_check_on_output(lowercase_)
a__ =model(
pixel_values=lowercase_ , pixel_mask=lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_)
comm_check_on_output(lowercase_)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class lowercase_ (lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case ={'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[int]:
a__ =MaskaFormerModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowercase_)
@unittest.skip(reason='Mask2Former does not use inputs_embeds')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method')
def __UpperCamelCase ( self) -> Tuple:
pass
@unittest.skip(reason='Mask2Former is not a generative model')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings')
def __UpperCamelCase ( self) -> Tuple:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> str:
pass
def __UpperCamelCase ( self) -> Optional[Any]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
@slow
def __UpperCamelCase ( self) -> Any:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
a__ =MaskaFormerModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def __UpperCamelCase ( self) -> Dict:
a__ =(self.model_tester.min_size,) * 2
a__ ={
'pixel_values': torch.randn((2, 3, *size) , device=lowercase_),
'mask_labels': torch.randn((2, 10, *size) , device=lowercase_),
'class_labels': torch.zeros(2 , 10 , device=lowercase_).long(),
}
a__ =self.model_tester.get_config()
a__ =MaskaFormerForUniversalSegmentation(lowercase_).to(lowercase_)
a__ =model(**lowercase_)
self.assertTrue(outputs.loss is not None)
def __UpperCamelCase ( self) -> Tuple:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_)
def __UpperCamelCase ( self) -> int:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_).to(lowercase_)
a__ =model(**lowercase_ , output_attentions=lowercase_)
self.assertTrue(outputs.attentions is not None)
def __UpperCamelCase ( self) -> Union[str, Any]:
if not self.model_tester.is_training:
return
a__ =self.all_model_classes[1]
a__ , a__ , a__ , a__ , a__ =self.model_tester.prepare_config_and_inputs()
a__ =model_class(lowercase_)
model.to(lowercase_)
model.train()
a__ =model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_).loss
loss.backward()
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =self.all_model_classes[1]
a__ , a__ , a__ , a__ , a__ =self.model_tester.prepare_config_and_inputs()
a__ =True
a__ =True
a__ =model_class(lowercase_).to(lowercase_)
model.train()
a__ =model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_)
a__ =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a__ =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
a__ =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a__ =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowercase_)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
_lowerCAmelCase: str = 1e-4
def _lowercase( ):
a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowercase_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self) -> Tuple:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __UpperCamelCase ( self) -> Optional[Any]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def __UpperCamelCase ( self) -> str:
a__ =MaskaFormerModel.from_pretrained(self.model_checkpoints).to(lowercase_)
a__ =self.default_image_processor
a__ =prepare_img()
a__ =image_processor(lowercase_ , return_tensors='pt').to(lowercase_)
a__ =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(lowercase_ , (1, 3, 384, 384))
with torch.no_grad():
a__ =model(**lowercase_)
a__ =torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(lowercase_)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_))
a__ =torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(lowercase_)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_))
a__ =torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(lowercase_)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase_ , atol=lowercase_))
def __UpperCamelCase ( self) -> Any:
a__ =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(lowercase_).eval()
a__ =self.default_image_processor
a__ =prepare_img()
a__ =image_processor(lowercase_ , return_tensors='pt').to(lowercase_)
a__ =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(lowercase_ , (1, 3, 384, 384))
with torch.no_grad():
a__ =model(**lowercase_)
# masks_queries_logits
a__ =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4))
a__ =[
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
a__ =torch.tensor(lowercase_).to(lowercase_)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_))
# class_queries_logits
a__ =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1))
a__ =torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_))
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(lowercase_).eval()
a__ =self.default_image_processor
a__ =image_processor(
[np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors='pt' , )
a__ =inputs['pixel_values'].to(lowercase_)
a__ =[el.to(lowercase_) for el in inputs['mask_labels']]
a__ =[el.to(lowercase_) for el in inputs['class_labels']]
with torch.no_grad():
a__ =model(**lowercase_)
self.assertTrue(outputs.loss is not None)
| 20 |
from math import sqrt
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' must been an int and positive"
__A = True
# 0 and 1 are none primes.
if number <= 1:
__A = False
for divisor in range(2 , int(round(sqrt(a_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__A = False
break
# precondition
assert isinstance(a_ , a_ ), "'status' must been from type bool"
return status
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__A = list(range(2 , n + 1 ) )
__A = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(a_ ) ):
for j in range(i + 1 , len(a_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__A = 0
# filters actual prime numbers.
__A = [x for x in begin_list if x != 0]
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
__A = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(a_ ):
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and number >= 0, "'number' must been an int and >= 0"
__A = [] # this list will be returns of the function.
# potential prime number factors.
__A = 2
__A = number
if number == 0 or number == 1:
ans.append(a_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(a_ ):
while quotient != 1:
if is_prime(a_ ) and (quotient % factor == 0):
ans.append(a_ )
quotient /= factor
else:
factor += 1
else:
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = max(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = min(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , a_ ), "compare bust been from type bool"
return number % 2 == 0
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , a_ ), "compare bust been from type bool"
return number % 2 != 0
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ ) and (number > 2) and is_even(a_ )
), "'number' must been an int, even and > 2"
__A = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__A = get_prime_numbers(a_ )
__A = len(a_ )
# run variable for while-loops.
__A = 0
__A = None
# exit variable. for break up the loops
__A = True
while i < len_pn and loop:
__A = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__A = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(a_ , a_ )
and (len(a_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__A = 0
while numbera != 0:
__A = numbera % numbera
__A = numbera
__A = rest
# precondition
assert isinstance(a_ , a_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__A = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__A = prime_factorization(a_ )
__A = prime_factorization(a_ )
elif numbera == 1 or numbera == 1:
__A = []
__A = []
__A = max(a_ , a_ )
__A = 0
__A = 0
__A = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__A = prime_fac_a.count(a_ )
__A = prime_fac_a.count(a_ )
for _ in range(max(a_ , a_ ) ):
ans *= n
else:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# precondition
assert isinstance(a_ , a_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'number' must been a positive int"
__A = 0
__A = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(a_ ):
ans += 1
# precondition
assert isinstance(a_ , a_ ) and is_prime(
a_ ), "'ans' must been a prime number and from type int"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
assert (
is_prime(a_ ) and is_prime(a_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__A = p_number_a + 1 # jump to the next number
__A = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
while number < p_number_a:
ans.append(a_ )
number += 1
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
# precondition
assert (
isinstance(a_ , a_ )
and ans[0] != p_number_a
and ans[len(a_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 1), "'n' must been int and >= 1"
__A = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(a_ )
# precondition
assert ans[0] == 1 and ans[len(a_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number > 1
), "'number' must been an int and >= 1"
__A = get_divisors(a_ )
# precondition
assert (
isinstance(a_ , a_ )
and (divisors[0] == 1)
and (divisors[len(a_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__A = gcd(abs(a_ ) , abs(a_ ) )
# precondition
assert (
isinstance(a_ , a_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been a int and >= 0"
__A = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been an int and >= 0"
__A = 0
__A = 1
__A = 1 # this will be return
for _ in range(n - 1 ):
__A = ans
ans += fiba
__A = tmp
return ans
| 55 | 0 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
UpperCAmelCase_ : Tuple = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
UpperCAmelCase_ : int = parser.parse_args()
UpperCAmelCase_ : Union[str, Any] = "cpu"
UpperCAmelCase_ : int = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
UpperCAmelCase_ : Optional[int] = "path-to-your-trained-model"
UpperCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
UpperCAmelCase_ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
UpperCAmelCase_ : Union[str, Any] = pipe.to(device)
# to channels last
UpperCAmelCase_ : List[str] = pipe.unet.to(memory_format=torch.channels_last)
UpperCAmelCase_ : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
UpperCAmelCase_ : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
UpperCAmelCase_ : str = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
UpperCAmelCase_ : int = torch.randn(2, 4, 64, 64)
UpperCAmelCase_ : str = torch.rand(1) * 999
UpperCAmelCase_ : Optional[Any] = torch.randn(2, 77, 768)
UpperCAmelCase_ : Union[str, Any] = (sample, timestep, encoder_hidden_status)
try:
UpperCAmelCase_ : Dict = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
UpperCAmelCase_ : Any = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
UpperCAmelCase_ : Optional[int] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
UpperCAmelCase_ : Tuple = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
UpperCAmelCase_ : Tuple = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
UpperCAmelCase_ : List[str] = 666
UpperCAmelCase_ : List[str] = torch.Generator(device).manual_seed(seed)
UpperCAmelCase_ : List[str] = {"generator": generator}
if args.steps is not None:
UpperCAmelCase_ : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
UpperCAmelCase_ : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 21 |
import os
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__A = os.path.dirname(os.path.realpath(a_ ) )
__A = os.path.join(a_ , "triangle.txt" )
with open(a_ ) as f:
__A = f.readlines()
__A = []
for line in triangle:
__A = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(a_ ) )
a.append(a_ )
for i in range(1 , len(a_ ) ):
for j in range(len(a[i] ) ):
__A = a[i - 1][j] if j != len(a[i - 1] ) else 0
__A = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(a_ , a_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 55 | 0 |
'''simple docstring'''
import math
import qiskit
def snake_case_ (UpperCamelCase : int = 1 , UpperCamelCase : int = 1 , UpperCamelCase : int = 1 ):
'''simple docstring'''
if (
isinstance(UpperCamelCase , UpperCamelCase )
or isinstance(UpperCamelCase , UpperCamelCase )
or isinstance(UpperCamelCase , UpperCamelCase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(UpperCamelCase ) != input_a)
or (math.floor(UpperCamelCase ) != input_a)
or (math.floor(UpperCamelCase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
_a = qiskit.QuantumRegister(4 , '''qr''' )
_a = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
_a = [input_a, input_a, carry_in]
_a = qiskit.QuantumCircuit(UpperCamelCase , UpperCamelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(UpperCamelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(UpperCamelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(UpperCamelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , UpperCamelCase ) # measure the last two qbits
_a = qiskit.Aer.get_backend('''aer_simulator''' )
_a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
print(F'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 22 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE :Union[str, Any] = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE :List[str] = object()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
__A = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(a_ ) - len(a_ ) + 1 ):
__A = [x.match(a_ ) for x, y in zip(a_ , ks[i:] )]
if matches and all(a_ ):
return True
return False
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
def replace(a_ , a_ ):
for rule, replacement in rules:
if _match(a_ , a_ ):
return replacement
return val
return replace
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , a_ )),
(("transformer", "wte", "embedding"), P("mp" , a_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(a_ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , a_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(a_ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , a_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
__A = _get_partition_rules()
__A = _replacement_rules(a_ )
__A = {k: _unmatched for k in flatten_dict(a_ )}
__A = {k: replace(a_ , a_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(a_ ) )
| 55 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = False ) -> str:
UpperCamelCase_ = scheduler
UpperCamelCase_ = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers]
UpperCamelCase_ = split_batches
UpperCamelCase_ = step_with_optimizer
UpperCamelCase_ = GradientState()
def _UpperCAmelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCamelCase_ = AcceleratorState().num_processes
for _ in range(_UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
return self.scheduler.get_last_lr()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.scheduler.state_dict()
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
self.scheduler.load_state_dict(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
return self.scheduler.get_lr()
def _UpperCAmelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
| 23 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] ,A : Union[str, Any] ,A : List[Any]=13 ,A : Optional[Any]=30 ,A : Union[str, Any]=2 ,A : Union[str, Any]=3 ,A : Any=True ,A : Dict=True ,A : str=32 ,A : Tuple=2 ,A : Optional[int]=4 ,A : Tuple=37 ,A : List[Any]="gelu" ,A : Dict=0.1 ,A : Optional[int]=0.1 ,A : List[Any]=10 ,A : Optional[Any]=0.02 ,A : Dict=3 ,A : Dict=None ,A : List[Any]=2 ,):
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = type_sequence_label_size
__A = initializer_range
__A = scope
__A = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__A = (image_size // patch_size) ** 2
__A = num_patches + 2
def UpperCamelCase_ ( self : List[Any] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Optional[int] ):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,A : Optional[int] ,A : Union[str, Any] ):
__A = TFDeiTModel(config=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[Any] ,A : List[Any] ,A : Optional[Any] ,A : Dict ):
__A = TFDeiTForMaskedImageModeling(config=A )
__A = model(A )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__A = 1
__A = TFDeiTForMaskedImageModeling(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase_ ( self : Optional[Any] ,A : Union[str, Any] ,A : Dict ,A : Union[str, Any] ):
__A = self.type_sequence_label_size
__A = TFDeiTForImageClassification(A )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A = 1
__A = TFDeiTForImageClassification(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : str ):
__A = TFDeiTModelTester(self )
__A = ConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def UpperCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
def UpperCamelCase_ ( self : List[Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A ,tf.keras.layers.Dense ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ,A : List[str] ,A : Optional[Any]=False ):
__A = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCamelCase_ ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFDeiTModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : int ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[int] ):
__A = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="tf" )
# forward pass
__A = model(**A )
# verify the logits
__A = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape ,A )
__A = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] )-> Optional[Any]:
'''simple docstring'''
__snake_case = []
for part_id in partition_order:
__snake_case = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Any:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(2 )
__snake_case = [1, 0]
__snake_case = _generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> int:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(10 ).repartition(1 )
__snake_case = SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Union[str, Any]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__snake_case = lambda _lowerCamelCase : x.reverse()
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case = SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
__snake_case , __snake_case = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCamelCase ()-> Optional[int]:
'''simple docstring'''
__snake_case = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__snake_case = spark.range(1_00 ).repartition(1 )
__snake_case = Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 24 |
SCREAMING_SNAKE_CASE :List[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :Union[str, Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :int = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
assert len(str(a_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
__A = year // 1_0_0
__A = (5 * (century % 4) + 2) % 7
__A = year % 1_0_0
__A = centurian % 1_2
__A = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__A = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__A = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 0 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , a : List[Any] , a : List[Any]=13 , a : Union[str, Any]=7 , a : Optional[int]=True , a : Optional[int]=True , a : int=True , a : Any=True , a : Dict=99 , a : Tuple=32 , a : Optional[int]=5 , a : List[Any]=4 , a : Optional[int]=4 , a : List[str]="gelu" , a : Optional[int]=0.0 , a : int=0.1 , a : List[Any]=True , a : Union[str, Any]=512 , a : Tuple=16 , a : Union[str, Any]=2 , a : List[str]=0.02 , a : Any=3 , a : int=4 , a : int=None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Dict = seq_length
SCREAMING_SNAKE_CASE : str = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = intermediate_multiple_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout
SCREAMING_SNAKE_CASE : Any = attention_dropout
SCREAMING_SNAKE_CASE : str = weight_tying
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = num_labels
SCREAMING_SNAKE_CASE : List[Any] = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Optional[int] = True
return config, input_ids, input_mask, token_labels
def __UpperCamelCase ( self : List[str] , a : Union[str, Any] , a : Optional[Any] , a : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = GPTNeoXJapaneseModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a , attention_mask=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : List[str] , a : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = GPTNeoXJapaneseModel(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Optional[Any] , a : Tuple , a : Union[str, Any] , a : Union[str, Any] , a : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = GPTNeoXJapaneseForCausalLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int] , a : Tuple , a : Tuple , a : Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : str = GPTNeoXJapaneseForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE : List[str] = model(a , attention_mask=a , use_cache=a )
SCREAMING_SNAKE_CASE : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE : List[Any] = model(a , attention_mask=a , output_hidden_states=a )
SCREAMING_SNAKE_CASE : Any = output_from_no_past["hidden_states"][0]
SCREAMING_SNAKE_CASE : Dict = model(
a , attention_mask=a , past_key_values=a , output_hidden_states=a , )["hidden_states"][0]
# select random slice
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1e-3 ) )
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCamelCase__ =(GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ =(
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTNeoXJapaneseModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=a , hidden_size=37 )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE : Dict = None
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(a , a , a )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*a )
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = "abeja/gpt-neox-japanese-2.7b"
SCREAMING_SNAKE_CASE : Optional[int] = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
SCREAMING_SNAKE_CASE : List[Any] = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
SCREAMING_SNAKE_CASE : List[str] = GPTNeoXJapaneseTokenizer.from_pretrained(a )
SCREAMING_SNAKE_CASE : Tuple = GPTNeoXJapaneseForCausalLM.from_pretrained(a )
SCREAMING_SNAKE_CASE : Optional[Any] = []
for prompt in prompts:
SCREAMING_SNAKE_CASE : str = tokenizer(a , return_tensors="pt" ).input_ids
SCREAMING_SNAKE_CASE : Any = model.generate(a , max_length=50 )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(a , skip_special_tokens=a )
predicted_outputs += generated_string
self.assertListEqual(a , a )
| 25 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase ( a_ = "isbn/0140328726" ) -> dict:
"""simple docstring"""
__A = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
__A = F'''{olid} is not a valid Open Library olid'''
raise ValueError(a_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def UpperCAmelCase ( a_ ) -> dict:
"""simple docstring"""
__A = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
__A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__A = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
__A = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(a_ , a_ ):
__A = ", ".join(a_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE :int = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
SCREAMING_SNAKE_CASE :Any = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 55 | 0 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
__UpperCamelCase = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
__UpperCamelCase = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
__UpperCamelCase = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : int=None , __magic_name__ : Dict=None , __magic_name__ : Union[str, Any]=False ) -> str:
"""simple docstring"""
if concatenate_texts:
return compute_measures(__magic_name__ , __magic_name__ )["wer"]
else:
__snake_case : Union[str, Any] = 0
__snake_case : Tuple = 0
for prediction, reference in zip(__magic_name__ , __magic_name__ ):
__snake_case : Dict = compute_measures(__magic_name__ , __magic_name__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 26 |
import requests
SCREAMING_SNAKE_CASE :List[str] = 'YOUR API KEY'
def UpperCAmelCase ( a_ , a_ = giphy_api_key ) -> list:
"""simple docstring"""
__A = "+".join(query.split() )
__A = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
__A = requests.get(a_ ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 55 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : Dict = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ["ConvNextFeatureExtractor"]
__A : Optional[Any] = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 27 |
import itertools
import math
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__A = 2
while True:
if is_prime(a_ ):
yield num
num += 1
def UpperCAmelCase ( a_ = 1_0_0_0_1 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , a_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 | 0 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A, A ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=A, scheduler=A )
@torch.no_grad()
def __call__( self, A = 1, A = None, A = 50, A = "pil", A = True, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=A, )
SCREAMING_SNAKE_CASE : Dict = image.to(self.device )
# set step values
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE : List[Any] = self.unet(A, A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step(A, A, A ).prev_sample
SCREAMING_SNAKE_CASE : str = (image / 2 + 0.5).clamp(0, 1 )
SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Dict = self.numpy_to_pil(A )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=A ), "This is a local test"
| 28 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
__A = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__A = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(a_ ):
os.makedirs(a_ )
__A = model.state_dict()
def to_tf_var_name(a_ ):
for patt, repl in iter(a_ ):
__A = name.replace(a_ , a_ )
return F'''bert/{name}'''
def create_tf_var(a_ , a_ , a_ ):
__A = tf.dtypes.as_dtype(tensor.dtype )
__A = tf.get_variable(dtype=a_ , shape=tensor.shape , name=a_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__A = to_tf_var_name(a_ )
__A = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__A = torch_tensor.T
__A = create_tf_var(tensor=a_ , name=a_ , session=a_ )
tf.keras.backend.set_value(a_ , a_ )
__A = session.run(a_ )
print(F'''Successfully created {tf_name}: {np.allclose(a_ , a_ )}''' )
__A = tf.train.Saver(tf.trainable_variables() )
saver.save(a_ , os.path.join(a_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def UpperCAmelCase ( a_=None ) -> List[Any]:
"""simple docstring"""
__A = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=a_ , required=a_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=a_ , default=a_ , required=a_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=a_ , required=a_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=a_ , required=a_ , help="Directory in which to save tensorflow model" )
__A = parser.parse_args(a_ )
__A = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 55 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return base * power(lowerCAmelCase__ ,(exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
A_ = int(input("""Enter the base: """).strip())
A_ = int(input("""Enter the exponent: """).strip())
A_ = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
A_ = 1 / result
print(f"{base} to the power of {exponent} is {result}")
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :Any = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''megatron-bert'''
def __init__( self ,_SCREAMING_SNAKE_CASE=29_056 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=4_096 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-12 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE="absolute" ,_SCREAMING_SNAKE_CASE=True ,**_SCREAMING_SNAKE_CASE ,) -> Tuple:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : Tuple = type_vocab_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : Dict = position_embedding_type
UpperCAmelCase_ : int = use_cache
| 30 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE :int = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def UpperCAmelCase ( a_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__A = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
__A = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
__A = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 55 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ : Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Any=8 ) -> Dict:
SCREAMING_SNAKE_CASE_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , _lowerCAmelCase : UNetaDConditionModel , _lowerCAmelCase : DDPMScheduler , _lowerCAmelCase : VQModel , ):
super().__init__()
self.register_modules(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , movq=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
if latents is None:
SCREAMING_SNAKE_CASE_ = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
SCREAMING_SNAKE_CASE_ = latents.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE_ = torch.device(F"cuda:{gpu_id}" )
SCREAMING_SNAKE_CASE_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Any=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE_ = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cpu_offload_with_hook(_lowerCAmelCase , _lowerCAmelCase , prev_module_hook=_lowerCAmelCase )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self : int ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__( self : Any , _lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 100 , _lowerCAmelCase : float = 4.0 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , ):
SCREAMING_SNAKE_CASE_ = self._execution_device
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = torch.cat(_lowerCAmelCase , dim=0 )
SCREAMING_SNAKE_CASE_ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = torch.cat(_lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ = image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
SCREAMING_SNAKE_CASE_ = negative_image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCAmelCase )
self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps
SCREAMING_SNAKE_CASE_ = self.unet.config.in_channels
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = downscale_height_and_width(_lowerCAmelCase , _lowerCAmelCase , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_ = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE_ = self.unet(
sample=_lowerCAmelCase , timestep=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , added_cond_kwargs=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase , )[0]
# post-processing
SCREAMING_SNAKE_CASE_ = self.movq.decode(_lowerCAmelCase , force_not_quantize=_lowerCAmelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE_ = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_ = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 31 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] ):
__A = tempfile.mkdtemp()
__A = BlipImageProcessor()
__A = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
__A = BlipaProcessor(A ,A )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ,**A : int ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).tokenizer
def UpperCamelCase_ ( self : Dict ,**A : Optional[int] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).image_processor
def UpperCamelCase_ ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : Optional[int] ):
__A = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
__A = [Image.fromarray(np.moveaxis(A ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Any ):
__A = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
__A = self.get_image_processor(do_normalize=A ,padding_value=1.0 )
__A = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=A ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = self.prepare_image_inputs()
__A = image_processor(A ,return_tensors="np" )
__A = processor(images=A ,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self : Tuple ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = processor(text=A )
__A = tokenizer(A ,return_token_type_ids=A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self : int ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A = processor.batch_decode(A )
__A = tokenizer.batch_decode(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
| 55 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 32 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,A : Any ,A : List[str] ,A : Union[str, Any]=10_24 ,A : int=10_24 ,A : Optional[Any]=3.6 ):
__A = tokenizer
__A = tokenizer.bos_token_id
__A = dataset
__A = seq_length
__A = seq_length * chars_per_token * num_of_sequences
def __iter__( self : List[Any] ):
__A = iter(self.dataset )
__A = True
while more_examples:
__A , __A = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
__A = False
break
__A = tokenizer(A ,truncation=A )["input_ids"]
__A = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 ,len(A ) ,self.seq_length ):
__A = all_token_ids[i : i + self.seq_length]
if len(A ) == self.seq_length:
yield torch.tensor(A )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = {"streaming": True}
__A = load_dataset(args.dataset_name , split="train" , **a_ )
__A = ConstantLengthDataset(a_ , a_ , seq_length=args.seq_length )
__A = DataLoader(a_ , batch_size=args.batch_size )
return eval_dataloader
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
model.eval()
__A = []
for step, batch in enumerate(a_ ):
with torch.no_grad():
__A = model(a_ , labels=a_ )
__A = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(a_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__A = torch.mean(torch.cat(a_ ) )
try:
__A = torch.exp(a_ )
except OverflowError:
__A = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
SCREAMING_SNAKE_CASE :Optional[int] = Accelerator()
# Parse configuration
SCREAMING_SNAKE_CASE :str = HfArgumentParser(EvaluationArguments)
SCREAMING_SNAKE_CASE :int = parser.parse_args()
set_seed(args.seed)
# Logging
SCREAMING_SNAKE_CASE :Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
SCREAMING_SNAKE_CASE :List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
SCREAMING_SNAKE_CASE :int = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
SCREAMING_SNAKE_CASE :List[str] = create_dataloader(args)
# Prepare everything with our `accelerator`.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 55 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self:int , _a:Optional[int] , _a:List[Any]=7 , _a:Union[str, Any]=3 , _a:List[str]=18 , _a:Any=30 , _a:str=4_00 , _a:Optional[Any]=True , _a:List[Any]=None , _a:Optional[Any]=True , ):
snake_case__ = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_normalize
def SCREAMING_SNAKE_CASE__ ( self:int ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : str = ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = ImageGPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''clusters''' ) )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
snake_case__ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_a , obj[key] ) )
else:
self.assertEqual(obj[key] , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = os.path.join(_a , '''image_processor.json''' )
image_processor_first.to_json_file(_a )
snake_case__ = self.image_processing_class.from_json_file(_a ).to_dict()
snake_case__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_a )
snake_case__ = self.image_processing_class.from_pretrained(_a ).to_dict()
snake_case__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _a )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
pass
def SCREAMING_SNAKE_CASE ( ) -> Dict:
snake_case__ = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
snake_case__ = Image.open(dataset[4]['''file'''] )
snake_case__ = Image.open(dataset[5]['''file'''] )
snake_case__ = [imagea, imagea]
return images
@require_vision
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
snake_case__ = prepare_images()
# test non-batched
snake_case__ = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
snake_case__ = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _a )
# test batched
snake_case__ = image_processing(_a , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
snake_case__ = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _a )
| 33 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = LayoutLMTokenizer
snake_case_ = LayoutLMTokenizerFast
snake_case_ = True
snake_case_ = True
def UpperCamelCase_ ( self : Any ):
super().setUp()
__A = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self : Tuple ,**A : int ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : Optional[Any] ,A : Any ):
__A = "UNwant\u00E9d,running"
__A = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ ( self : str ):
__A = self.tokenizer_class(self.vocab_file )
__A = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(A ,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ ( self : int ):
pass
| 55 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_ = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['MaskFormerFeatureExtractor']
SCREAMING_SNAKE_CASE_ = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
SCREAMING_SNAKE_CASE_ = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 34 |
SCREAMING_SNAKE_CASE :int = {str(digit): digit**5 for digit in range(10)}
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a_ ) )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(a_ ) )
if __name__ == "__main__":
print(solution())
| 55 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ) -> Optional[Any]:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(A__ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def a ( ) -> str:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def a ( ) -> List[str]:
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(A__ ):
http_head('''https://huggingface.co''' )
| 35 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
__A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A ,"tf_padding" ) )
self.parent.assertTrue(hasattr(A ,"depth_multiplier" ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ,A : int ,A : List[Any]=13 ,A : int=3 ,A : Optional[Any]=32 ,A : Union[str, Any]=0.25 ,A : Tuple=8 ,A : Optional[int]=True ,A : Union[str, Any]=10_24 ,A : Any=32 ,A : Optional[int]="relu6" ,A : int=0.1 ,A : Optional[Any]=0.02 ,A : Optional[Any]=True ,A : List[str]=True ,A : str=10 ,A : str=None ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = depth_multiplier
__A = min_depth
__A = tf_padding
__A = int(last_hidden_size * depth_multiplier )
__A = output_stride
__A = hidden_act
__A = classifier_dropout_prob
__A = use_labels
__A = is_training
__A = num_labels
__A = initializer_range
__A = scope
def UpperCamelCase_ ( self : Optional[int] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.num_labels )
__A = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
__A = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,min_depth=self.min_depth ,tf_padding=self.tf_padding ,hidden_act=self.hidden_act ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : Optional[int] ,A : str ,A : Tuple ,A : Optional[int] ,A : List[str] ):
__A = MobileNetVaModel(config=A )
model.to(A )
model.eval()
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase_ ( self : List[Any] ,A : Union[str, Any] ,A : List[Any] ,A : int ,A : Union[str, Any] ):
__A = self.num_labels
__A = MobileNetVaForImageClassification(A )
model.to(A )
model.eval()
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Tuple ):
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Any ):
__A = MobileNetVaModelTester(self )
__A = MobileNetVaConfigTester(self ,config_class=A ,has_text_modality=A )
def UpperCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def UpperCamelCase_ ( self : Any ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[int] ):
def check_hidden_states_output(A : List[Any] ,A : List[Any] ,A : Optional[int] ):
__A = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(A ,A ) )
__A = outputs.hidden_states
__A = 26
self.assertEqual(len(A ) ,A )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(A ,A ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = MobileNetVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : List[str] ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
__A = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
__A = model(**A )
# verify the logits
__A = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape ,A )
__A = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=snake_case )
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : str = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__lowerCamelCase : ClassVar[Features] = Features({'''image''': Image()} )
__lowerCamelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
__lowerCamelCase : str = "image"
__lowerCamelCase : str = "labels"
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] ,SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
snake_case : Any = copy.deepcopy(self )
snake_case : List[str] = self.label_schema.copy()
snake_case : Union[str, Any] = features[self.label_column]
snake_case : List[str] = label_schema
return task_template
@property
def snake_case_ ( self ):
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 36 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str ,A : int ,A : int=2 ,A : Optional[Any]=3 ,A : Dict=4 ,A : Optional[int]=2 ,A : Union[str, Any]=7 ,A : List[str]=True ,A : Union[str, Any]=True ,A : Optional[int]=True ,A : Optional[int]=True ,A : Tuple=99 ,A : Optional[int]=36 ,A : Dict=3 ,A : str=4 ,A : Optional[Any]=37 ,A : Dict="gelu" ,A : Dict=0.1 ,A : Union[str, Any]=0.1 ,A : Union[str, Any]=5_12 ,A : Any=16 ,A : Union[str, Any]=2 ,A : List[Any]=0.02 ,A : List[Any]=6 ,A : Optional[int]=6 ,A : List[Any]=3 ,A : Union[str, Any]=4 ,A : Tuple=None ,A : List[str]=10_00 ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = patch_size
__A = text_seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = coordinate_size
__A = shape_size
__A = num_labels
__A = num_choices
__A = scope
__A = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__A = text_seq_length
__A = (image_size // patch_size) ** 2 + 1
__A = self.text_seq_length + self.image_seq_length
def UpperCamelCase_ ( self : int ):
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
__A = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__A = bbox[i, j, 3]
__A = bbox[i, j, 1]
__A = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__A = bbox[i, j, 2]
__A = bbox[i, j, 0]
__A = t
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.text_seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
__A = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self : Optional[int] ,A : List[str] ,A : Any ,A : Dict ,A : List[Any] ,A : Optional[int] ,A : Any ,A : Dict ,A : List[Any] ):
__A = LayoutLMvaModel(config=A )
model.to(A )
model.eval()
# text + image
__A = model(A ,pixel_values=A )
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__A = model(pixel_values=A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] ,A : Dict ,A : List[str] ,A : Any ,A : List[Any] ,A : Any ,A : Any ,A : Dict ,A : Optional[Any] ):
__A = self.num_labels
__A = LayoutLMvaForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : str ,A : Optional[Any] ,A : Dict ,A : str ,A : Tuple ,A : Union[str, Any] ,A : List[Any] ,A : Any ,A : Union[str, Any] ):
__A = self.num_labels
__A = LayoutLMvaForTokenClassification(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : int ,A : str ,A : List[str] ,A : int ,A : List[str] ,A : List[str] ,A : Dict ):
__A = LayoutLMvaForQuestionAnswering(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,start_positions=A ,end_positions=A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : str ,A : Any ,A : Any ,A : Tuple ,A : List[Any] ,A : Optional[Any] ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = LayoutLMvaModelTester(self )
__A = ConfigTester(self ,config_class=A ,hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ,A : int ,A : List[str] ,A : Dict=False ):
__A = copy.deepcopy(A )
if model_class in get_values(A ):
__A = {
k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous()
if isinstance(A ,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
__A = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in get_values(A ):
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=A ,)
return inputs_dict
def UpperCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A = type
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = LayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Any ):
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Dict ):
__A = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).pixel_values.to(A )
__A = torch.tensor([[1, 2]] )
__A = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__A = model(
input_ids=input_ids.to(A ) ,bbox=bbox.to(A ) ,pixel_values=pixel_values.to(A ) ,)
# verify the logits
__A = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape ,A )
__A = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : str = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
UpperCamelCase : Optional[int] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ['input_ids', 'attention_mask']
_lowercase = GPTaTokenizer
def __init__( self : Optional[Any] , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Any="<|endoftext|>" , lowerCamelCase__ : Dict="<|endoftext|>" , lowerCamelCase__ : Tuple="<|endoftext|>" , lowerCamelCase__ : str=False , **lowerCamelCase__ : Any , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : Dict = kwargs.pop("add_bos_token" , lowerCamelCase__ )
a__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : Optional[int] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) )
a__ : List[str] = add_prefix_space
a__ : Union[str, Any] = pre_tok_class(**lowerCamelCase__ )
a__ : int = add_prefix_space
def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ):
a__ : str = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[Any] ):
a__ : Any = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def _UpperCamelCase( self : Any , lowerCamelCase__ : "Conversation" ):
a__ : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) + [self.eos_token_id] )
if len(lowerCamelCase__ ) > self.model_max_length:
a__ : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
| 37 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : List[str] ,A : str=7 ,A : Optional[Any]=3 ,A : Any=18 ,A : int=30 ,A : int=4_00 ,A : List[str]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=True ,A : Tuple=None ,A : Tuple=True ,A : Union[str, Any]=[0.5, 0.5, 0.5] ,A : str=[0.5, 0.5, 0.5] ,A : List[Any]=False ,):
__A = size if size is not None else {"height": 20, "width": 20}
__A = crop_size if crop_size is not None else {"height": 18, "width": 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_center_crop
__A = crop_size
__A = do_normalize
__A = image_mean
__A = image_std
__A = do_reduce_labels
def UpperCamelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(dataset[0]["file"] )
__A = Image.open(dataset[1]["file"] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(ds[0]["file"] )
__A = Image.open(ds[1]["file"] )
__A = Image.open(ds[2]["file"] )
__A = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = BeitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : List[Any] ):
__A = BeitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : int ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size" ) )
self.assertTrue(hasattr(A ,"do_center_crop" ) )
self.assertTrue(hasattr(A ,"center_crop" ) )
self.assertTrue(hasattr(A ,"do_normalize" ) )
self.assertTrue(hasattr(A ,"image_mean" ) )
self.assertTrue(hasattr(A ,"image_std" ) )
def UpperCamelCase_ ( self : List[str] ):
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels ,A )
__A = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=A )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels ,A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[str] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : str ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
__A = []
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__A = image_processing(image_inputs[0] ,maps[0] ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test not batched input (PIL images)
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched input (PIL images)
__A , __A = prepare_semantic_batch_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 1_50 )
__A = True
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
| 55 | 0 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
A_ : str = None
try:
import msvcrt
except ImportError:
A_ : Optional[int] = None
try:
import fcntl
except ImportError:
A_ : List[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
A_ : int = OSError
# Data
# ------------------------------------------------
A_ : List[str] = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
A_ : Optional[Any] = "3.0.12"
A_ : int = None
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
global _logger
snake_case__ : Any = _logger or logging.getLogger(__name__ )
return _logger
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = lock_file
return None
def __str__( self ):
snake_case__ : List[str] = f"The file lock '{self.lock_file}' could not be acquired."
return temp
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = lock
return None
def __enter__( self ):
return self.lock
def __exit__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.lock.release()
return None
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=-1 , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Union[str, Any] = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
snake_case__ : Any = self.hash_filename_if_too_long(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# The path to the lock file.
snake_case__ : List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
snake_case__ : Any = None
# The default timeout value.
snake_case__ : List[Any] = timeout
# We use this lock primarily for the lock counter.
snake_case__ : int = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
snake_case__ : Union[str, Any] = 0
return None
@property
def __UpperCamelCase ( self ):
return self._lock_file
@property
def __UpperCamelCase ( self ):
return self._timeout
@timeout.setter
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = float(__SCREAMING_SNAKE_CASE )
return None
def __UpperCamelCase ( self ):
raise NotImplementedError()
def __UpperCamelCase ( self ):
raise NotImplementedError()
@property
def __UpperCamelCase ( self ):
return self._lock_file_fd is not None
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
snake_case__ : List[Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
snake_case__ : int = id(self )
snake_case__ : int = self._lock_file
snake_case__ : Optional[Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(f"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__SCREAMING_SNAKE_CASE )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
snake_case__ : int = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
snake_case__ : Any = id(self )
snake_case__ : List[str] = self._lock_file
logger().debug(f"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
snake_case__ : Dict = 0
logger().debug(f"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self ):
self.acquire()
return self
def __exit__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.release()
return None
def __del__( self ):
self.release(force=__SCREAMING_SNAKE_CASE )
return None
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = os.path.basename(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > max_length and max_length > 0:
snake_case__ : Optional[Any] = os.path.dirname(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = str(hash(__SCREAMING_SNAKE_CASE ) )
snake_case__ : str = filename[: max_length - len(__SCREAMING_SNAKE_CASE ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return path
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=-1 , __SCREAMING_SNAKE_CASE=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__SCREAMING_SNAKE_CASE , timeout=__SCREAMING_SNAKE_CASE , max_filename_length=__SCREAMING_SNAKE_CASE )
snake_case__ : str = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
snake_case__ : Dict = os.open(self._lock_file , __SCREAMING_SNAKE_CASE )
except OSError:
pass
else:
try:
msvcrt.locking(__SCREAMING_SNAKE_CASE , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Any = fd
return None
def __UpperCamelCase ( self ):
snake_case__ : Dict = self._lock_file_fd
snake_case__ : Dict = None
msvcrt.locking(__SCREAMING_SNAKE_CASE , msvcrt.LK_UNLCK , 1 )
os.close(__SCREAMING_SNAKE_CASE )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=-1 , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : List[Any] = os.statvfs(os.path.dirname(__SCREAMING_SNAKE_CASE ) ).f_namemax
super().__init__(__SCREAMING_SNAKE_CASE , timeout=__SCREAMING_SNAKE_CASE , max_filename_length=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = os.O_RDWR | os.O_CREAT | os.O_TRUNC
snake_case__ : Dict = os.open(self._lock_file , __SCREAMING_SNAKE_CASE )
try:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = fd
return None
def __UpperCamelCase ( self ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
snake_case__ : List[str] = self._lock_file_fd
snake_case__ : int = None
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
os.close(__SCREAMING_SNAKE_CASE )
return None
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
snake_case__ : Union[str, Any] = os.open(self._lock_file , __SCREAMING_SNAKE_CASE )
except OSError:
pass
else:
snake_case__ : Optional[int] = fd
return None
def __UpperCamelCase ( self ):
os.close(self._lock_file_fd )
snake_case__ : Tuple = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
A_ : Optional[int] = None
if msvcrt:
A_ : Union[str, Any] = WindowsFileLock
elif fcntl:
A_ : List[str] = UnixFileLock
else:
A_ : List[Any] = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 38 |
from numpy import exp, pi, sqrt
def UpperCAmelCase ( a_ , a_ = 0.0 , a_ = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 0 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , SCREAMING_SNAKE_CASE__ , )
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
snake_case_ = [image]
if isinstance(image[0] , PIL.Image.Image ):
snake_case_, snake_case_ = image[0].size
snake_case_, snake_case_ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
snake_case_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
snake_case_ = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 )
snake_case_ = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 255.0
snake_case_ = image.transpose(0 , 3 , 1 , 2 )
snake_case_ = 2.0 * image - 1.0
snake_case_ = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
elif isinstance(image[0] , torch.Tensor ):
snake_case_ = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
return image
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return mask
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
snake_case_ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
snake_case_, snake_case_ = mask[0].size
snake_case_, snake_case_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case_ = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
snake_case_ = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 )
snake_case_ = mask.astype(np.floataa ) / 255.0
snake_case_ = 0
snake_case_ = 1
snake_case_ = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
elif isinstance(mask[0] , torch.Tensor ):
snake_case_ = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
return mask
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : UNetaDModel
SCREAMING_SNAKE_CASE : RePaintScheduler
def __init__( self : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) ->Tuple:
super().__init__()
self.register_modules(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
@torch.no_grad()
def __call__( self : Union[str, Any] , _UpperCamelCase : Union[torch.Tensor, PIL.Image.Image] , _UpperCamelCase : Union[torch.Tensor, PIL.Image.Image] , _UpperCamelCase : int = 2_5_0 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : int = 1_0 , _UpperCamelCase : int = 1_0 , _UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCamelCase : Optional[str] = "pil" , _UpperCamelCase : bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
snake_case_ = image
snake_case_ = _preprocess_image(_UpperCamelCase )
snake_case_ = original_image.to(device=self.device , dtype=self.unet.dtype )
snake_case_ = _preprocess_mask(_UpperCamelCase )
snake_case_ = mask_image.to(device=self.device , dtype=self.unet.dtype )
snake_case_ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case_ = original_image.shape
snake_case_ = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.device )
snake_case_ = eta
snake_case_ = self.scheduler.timesteps[0] + 1
snake_case_ = generator[0] if isinstance(_UpperCamelCase , _UpperCamelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
snake_case_ = self.unet(_UpperCamelCase , _UpperCamelCase ).sample
# compute previous image: x_t -> x_t-1
snake_case_ = self.scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
snake_case_ = self.scheduler.undo_step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
snake_case_ = t
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 39 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase_ ( self : Optional[int] ):
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__A = "xvjiarui/stable-diffusion-2-inpainting"
__A , __A = FlaxStableDiffusionInpaintPipeline.from_pretrained(A ,safety_checker=A )
__A = "Face of a yellow cat, high resolution, sitting on a park bench"
__A = jax.random.PRNGKey(0 )
__A = 50
__A = jax.device_count()
__A = num_samples * [prompt]
__A = num_samples * [init_image]
__A = num_samples * [mask_image]
__A , __A , __A = pipeline.prepare_inputs(A ,A ,A )
# shard inputs and rng
__A = replicate(A )
__A = jax.random.split(A ,jax.device_count() )
__A = shard(A )
__A = shard(A )
__A = shard(A )
__A = pipeline(
A ,A ,A ,A ,A ,A ,jit=A )
__A = output.images.reshape(A ,5_12 ,5_12 ,3 )
__A = images[0, 2_53:2_56, 2_53:2_56, -1]
__A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__A = jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 55 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=2, ) -> Any:
UpperCamelCase : Dict = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : Tuple = image_size
UpperCamelCase : List[Any] = patch_size
UpperCamelCase : Union[str, Any] = num_channels
UpperCamelCase : Union[str, Any] = is_training
UpperCamelCase : Any = use_labels
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : str = hidden_act
UpperCamelCase : Tuple = hidden_dropout_prob
UpperCamelCase : int = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = type_sequence_label_size
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : Optional[Any] = scope
UpperCamelCase : List[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase : Union[str, Any] = (image_size // patch_size) ** 2
UpperCamelCase : str = num_patches + 2
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : str = None
if self.use_labels:
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ) -> Optional[int]:
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : int = DeiTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Optional[int] = DeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase : Union[str, Any] = 1
UpperCamelCase : Tuple = DeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : int = self.type_sequence_label_size
UpperCamelCase : Dict = DeiTForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase : str = 1
UpperCamelCase : List[Any] = DeiTForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Dict = config_and_inputs
UpperCamelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Optional[int] = False
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : str = DeiTModelTester(self )
UpperCamelCase : str = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def snake_case_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def snake_case_ ( self ) -> str:
pass
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_, nn.Linear ) )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : List[str] = [*signature.parameters.keys()]
UpperCamelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
UpperCamelCase : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case_ ( self ) -> str:
if not self.model_tester.is_training:
return
UpperCamelCase , UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : int = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(SCREAMING_SNAKE_CASE_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def snake_case_ ( self ) -> Dict:
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase : Tuple = False
UpperCamelCase : int = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCamelCase : str = model_class(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[Any] = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(SCREAMING_SNAKE_CASE_ ),
*get_values(SCREAMING_SNAKE_CASE_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
UpperCamelCase : Dict = problem_type['title']
UpperCamelCase : Optional[int] = problem_type['num_labels']
UpperCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_labels=SCREAMING_SNAKE_CASE_ )
if problem_type["num_labels"] > 1:
UpperCamelCase : int = inputs['labels'].unsqueeze(1 ).repeat(1, problem_type['num_labels'] )
UpperCamelCase : Tuple = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE_ ) as warning_list:
UpperCamelCase : List[Any] = model(**SCREAMING_SNAKE_CASE_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def snake_case_ ( self ) -> Union[str, Any]:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Tuple = DeiTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> List[str]:
UpperCamelCase : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> List[str]:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : List[Any] = prepare_img()
UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : int = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def snake_case_ ( self ) -> str:
UpperCamelCase : str = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224', torch_dtype=torch.floataa, device_map='auto' )
UpperCamelCase : List[Any] = self.default_image_processor
UpperCamelCase : Optional[int] = prepare_img()
UpperCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' )
UpperCamelCase : Optional[int] = inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )
| 40 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : Optional[int] ,A : Optional[int]=7 ,A : Optional[Any]=3 ,A : List[str]=18 ,A : Any=30 ,A : Tuple=4_00 ,A : Union[str, Any]=True ,A : Optional[Any]=32 ,A : Union[str, Any]=True ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size_divisor
__A = do_rescale
def UpperCamelCase_ ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = GLPNImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : int ):
__A = GLPNImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Any ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size_divisor" ) )
self.assertTrue(hasattr(A ,"resample" ) )
self.assertTrue(hasattr(A ,"do_rescale" ) )
def UpperCamelCase_ ( self : str ):
pass
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : Optional[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 55 | 0 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowerCAmelCase__ = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
lowerCAmelCase__ = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
lowerCAmelCase__ = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
lowerCAmelCase__ = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
lowerCAmelCase__ = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) ,homepage='''https://github.com/openai/human-eval''' ,codebase_urls=['''https://github.com/openai/human-eval'''] ,reference_urls=['''https://github.com/openai/human-eval'''] ,license=_LICENSE ,)
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : int=[1, 1_0, 1_0_0] ,lowercase__ : Any=4 ,lowercase__ : Union[str, Any]=3.0 ):
if os.getenv('''HF_ALLOW_CODE_EVAL''' ,0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=lowercase__ ) as executor:
__lowercase = []
__lowercase = Counter()
__lowercase = 0
__lowercase = defaultdict(lowercase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowercase__ ,lowercase__ ) ):
for candidate in candidates:
__lowercase = candidate + '''\n''' + test_case
__lowercase = (test_program, timeout, task_id, completion_id[task_id])
__lowercase = executor.submit(lowercase__ ,*lowercase__ )
futures.append(lowercase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowercase__ ):
__lowercase = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
__lowercase , __lowercase = [], []
for result in results.values():
result.sort()
__lowercase = [r[1]['''passed'''] for r in result]
total.append(len(lowercase__ ) )
correct.append(sum(lowercase__ ) )
__lowercase = np.array(lowercase__ )
__lowercase = np.array(lowercase__ )
__lowercase = k
__lowercase = {F"pass@{k}": estimate_pass_at_k(lowercase__ ,lowercase__ ,lowercase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
def estimator(A__ , A__ , A__ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(A__ , A__ ):
__lowercase = itertools.repeat(A__ , len(A__ ) )
else:
assert len(A__ ) == len(A__ )
__lowercase = iter(A__ )
return np.array([estimator(int(A__ ) , int(A__ ) , A__ ) for n, c in zip(A__ , A__ )] )
| 41 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"image": Image()} )
snake_case_ = Features({"labels": ClassLabel} )
snake_case_ = "image"
snake_case_ = "labels"
def UpperCamelCase_ ( self : Optional[Any] ,A : Tuple ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] ,A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__A = copy.deepcopy(self )
__A = self.label_schema.copy()
__A = features[self.label_column]
__A = label_schema
return task_template
@property
def UpperCamelCase_ ( self : Any ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 55 | 0 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
lowerCamelCase_ = ''
for i in table:
res += inp[i - 1]
return res
def _UpperCamelCase ( __UpperCamelCase ) -> Tuple:
return data[1:] + data[0]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
lowerCamelCase_ = ''
for i in range(len(__UpperCamelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
lowerCamelCase_ = int('0b' + data[0] + data[-1] ,2 )
lowerCamelCase_ = int('0b' + data[1:3] ,2 )
return bin(s[row][col] )[2:]
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
lowerCamelCase_ = message[:4]
lowerCamelCase_ = message[4:]
lowerCamelCase_ = apply_table(__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = xor(__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = apply_sbox(__UpperCamelCase ,temp[:4] ) # noqa: E741
lowerCamelCase_ = apply_sbox(__UpperCamelCase ,temp[4:] )
lowerCamelCase_ = '0' * (2 - len(__UpperCamelCase )) + l # noqa: E741
lowerCamelCase_ = '0' * (2 - len(__UpperCamelCase )) + r
lowerCamelCase_ = apply_table(l + r ,__UpperCamelCase )
lowerCamelCase_ = xor(__UpperCamelCase ,__UpperCamelCase )
return temp + right
if __name__ == "__main__":
A_ = input("Enter 10 bit key: ")
A_ = input("Enter 8 bit message: ")
A_ = [6, 3, 7, 4, 8, 5, 10, 9]
A_ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
A_ = [2, 4, 3, 1]
A_ = [2, 6, 3, 1, 4, 8, 5, 7]
A_ = [4, 1, 3, 5, 7, 2, 8, 6]
A_ = [4, 1, 2, 3, 2, 3, 4, 1]
A_ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
A_ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
A_ = apply_table(key, paa_table)
A_ = temp[:5]
A_ = temp[5:]
A_ = left_shift(left)
A_ = left_shift(right)
A_ = apply_table(left + right, pa_table)
A_ = left_shift(left)
A_ = left_shift(right)
A_ = left_shift(left)
A_ = left_shift(right)
A_ = apply_table(left + right, pa_table)
# encryption
A_ = apply_table(message, IP)
A_ = function(expansion, sa, sa, keya, temp)
A_ = temp[4:] + temp[:4]
A_ = function(expansion, sa, sa, keya, temp)
A_ = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
A_ = apply_table(CT, IP)
A_ = function(expansion, sa, sa, keya, temp)
A_ = temp[4:] + temp[:4]
A_ = function(expansion, sa, sa, keya, temp)
A_ = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 42 |
from math import sqrt
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' must been an int and positive"
__A = True
# 0 and 1 are none primes.
if number <= 1:
__A = False
for divisor in range(2 , int(round(sqrt(a_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__A = False
break
# precondition
assert isinstance(a_ , a_ ), "'status' must been from type bool"
return status
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__A = list(range(2 , n + 1 ) )
__A = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(a_ ) ):
for j in range(i + 1 , len(a_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__A = 0
# filters actual prime numbers.
__A = [x for x in begin_list if x != 0]
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
__A = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(a_ ):
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and number >= 0, "'number' must been an int and >= 0"
__A = [] # this list will be returns of the function.
# potential prime number factors.
__A = 2
__A = number
if number == 0 or number == 1:
ans.append(a_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(a_ ):
while quotient != 1:
if is_prime(a_ ) and (quotient % factor == 0):
ans.append(a_ )
quotient /= factor
else:
factor += 1
else:
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = max(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = min(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , a_ ), "compare bust been from type bool"
return number % 2 == 0
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , a_ ), "compare bust been from type bool"
return number % 2 != 0
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ ) and (number > 2) and is_even(a_ )
), "'number' must been an int, even and > 2"
__A = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__A = get_prime_numbers(a_ )
__A = len(a_ )
# run variable for while-loops.
__A = 0
__A = None
# exit variable. for break up the loops
__A = True
while i < len_pn and loop:
__A = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__A = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(a_ , a_ )
and (len(a_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__A = 0
while numbera != 0:
__A = numbera % numbera
__A = numbera
__A = rest
# precondition
assert isinstance(a_ , a_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__A = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__A = prime_factorization(a_ )
__A = prime_factorization(a_ )
elif numbera == 1 or numbera == 1:
__A = []
__A = []
__A = max(a_ , a_ )
__A = 0
__A = 0
__A = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__A = prime_fac_a.count(a_ )
__A = prime_fac_a.count(a_ )
for _ in range(max(a_ , a_ ) ):
ans *= n
else:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# precondition
assert isinstance(a_ , a_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'number' must been a positive int"
__A = 0
__A = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(a_ ):
ans += 1
# precondition
assert isinstance(a_ , a_ ) and is_prime(
a_ ), "'ans' must been a prime number and from type int"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
assert (
is_prime(a_ ) and is_prime(a_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__A = p_number_a + 1 # jump to the next number
__A = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
while number < p_number_a:
ans.append(a_ )
number += 1
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
# precondition
assert (
isinstance(a_ , a_ )
and ans[0] != p_number_a
and ans[len(a_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 1), "'n' must been int and >= 1"
__A = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(a_ )
# precondition
assert ans[0] == 1 and ans[len(a_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number > 1
), "'number' must been an int and >= 1"
__A = get_divisors(a_ )
# precondition
assert (
isinstance(a_ , a_ )
and (divisors[0] == 1)
and (divisors[len(a_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__A = gcd(abs(a_ ) , abs(a_ ) )
# precondition
assert (
isinstance(a_ , a_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been a int and >= 0"
__A = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been an int and >= 0"
__A = 0
__A = 1
__A = 1 # this will be return
for _ in range(n - 1 ):
__A = ans
ans += fiba
__A = tmp
return ans
| 55 | 0 |
from math import pi, sqrt, tan
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
lowercase__ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
lowercase__ = (sidea + sidea + sidea) / 2
lowercase__ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"""Rectangle: {area_rectangle(10, 20) = }""")
print(f"""Square: {area_square(10) = }""")
print(f"""Triangle: {area_triangle(10, 10) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(f"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(f"""Rhombus: {area_rhombus(10, 20) = }""")
print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(f"""Circle: {area_circle(20) = }""")
print(f"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(f"""Cube: {surface_area_cube(20) = }""")
print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(f"""Sphere: {surface_area_sphere(20) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(f"""Cone: {surface_area_cone(10, 20) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(f"""Torus: {surface_area_torus(20, 10) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(f"""Square: {area_reg_polygon(4, 10) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 43 |
import os
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__A = os.path.dirname(os.path.realpath(a_ ) )
__A = os.path.join(a_ , "triangle.txt" )
with open(a_ ) as f:
__A = f.readlines()
__A = []
for line in triangle:
__A = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(a_ ) )
a.append(a_ )
for i in range(1 , len(a_ ) ):
for j in range(len(a[i] ) ):
__A = a[i - 1][j] if j != len(a[i - 1] ) else 0
__A = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(a_ , a_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 55 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
UpperCAmelCase_ : List[str] = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : Any = {
'RUCAIBox/mvp': 1024,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = MvpTokenizer
def __init__( self : Any,__A : Tuple=None,__A : str=None,__A : List[Any]=None,__A : Union[str, Any]="replace",__A : Union[str, Any]="<s>",__A : Optional[int]="</s>",__A : List[str]="</s>",__A : Any="<s>",__A : Dict="<unk>",__A : Union[str, Any]="<pad>",__A : Optional[int]="<mask>",__A : List[str]=False,__A : str=True,**__A : str,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : str = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : Optional[int] = add_prefix_space
_lowerCamelCase : List[str] = pre_tok_class(**__A )
_lowerCamelCase : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[int] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : Optional[int] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : List[str] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : str = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Tuple = True
if changes_to_apply:
_lowerCamelCase : Dict = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Dict = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : List[str],__A : List[str] ):
_lowerCamelCase : Optional[int] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : Optional[Any] = value
def lowerCamelCase_ ( self : List[Any],*__A : Dict,**__A : int ):
_lowerCamelCase : Any = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Union[str, Any],*__A : Optional[int],**__A : Dict ):
_lowerCamelCase : Any = kwargs.get("is_split_into_words",__A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Dict,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : List[str],__A : int,__A : Optional[Any]=None ):
_lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Tuple = [self.sep_token_id]
_lowerCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 44 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE :Union[str, Any] = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE :List[str] = object()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
__A = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(a_ ) - len(a_ ) + 1 ):
__A = [x.match(a_ ) for x, y in zip(a_ , ks[i:] )]
if matches and all(a_ ):
return True
return False
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
def replace(a_ , a_ ):
for rule, replacement in rules:
if _match(a_ , a_ ):
return replacement
return val
return replace
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , a_ )),
(("transformer", "wte", "embedding"), P("mp" , a_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(a_ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , a_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(a_ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , a_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
__A = _get_partition_rules()
__A = _replacement_rules(a_ )
__A = {k: _unmatched for k in flatten_dict(a_ )}
__A = {k: replace(a_ , a_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(a_ ) )
| 55 | 0 |
from scipy.stats import spearmanr
import datasets
UpperCamelCase = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
UpperCamelCase = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
UpperCamelCase = r"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __a ( self :List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def __a ( self :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=False ):
UpperCamelCase__ :Any = spearmanr(lowerCamelCase__ , lowerCamelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 45 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] ,A : Union[str, Any] ,A : List[Any]=13 ,A : Optional[Any]=30 ,A : Union[str, Any]=2 ,A : Union[str, Any]=3 ,A : Any=True ,A : Dict=True ,A : str=32 ,A : Tuple=2 ,A : Optional[int]=4 ,A : Tuple=37 ,A : List[Any]="gelu" ,A : Dict=0.1 ,A : Optional[int]=0.1 ,A : List[Any]=10 ,A : Optional[Any]=0.02 ,A : Dict=3 ,A : Dict=None ,A : List[Any]=2 ,):
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = type_sequence_label_size
__A = initializer_range
__A = scope
__A = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__A = (image_size // patch_size) ** 2
__A = num_patches + 2
def UpperCamelCase_ ( self : List[Any] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Optional[int] ):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,A : Optional[int] ,A : Union[str, Any] ):
__A = TFDeiTModel(config=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[Any] ,A : List[Any] ,A : Optional[Any] ,A : Dict ):
__A = TFDeiTForMaskedImageModeling(config=A )
__A = model(A )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__A = 1
__A = TFDeiTForMaskedImageModeling(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase_ ( self : Optional[Any] ,A : Union[str, Any] ,A : Dict ,A : Union[str, Any] ):
__A = self.type_sequence_label_size
__A = TFDeiTForImageClassification(A )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A = 1
__A = TFDeiTForImageClassification(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : str ):
__A = TFDeiTModelTester(self )
__A = ConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def UpperCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
def UpperCamelCase_ ( self : List[Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A ,tf.keras.layers.Dense ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ,A : List[str] ,A : Optional[Any]=False ):
__A = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCamelCase_ ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFDeiTModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : int ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[int] ):
__A = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="tf" )
# forward pass
__A = model(**A )
# verify the logits
__A = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape ,A )
__A = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
_lowerCAmelCase : List[str] = 300 # TEMPERATURE (unit = K)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
SCREAMING_SNAKE_CASE :List[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :Union[str, Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :int = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
assert len(str(a_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
__A = year // 1_0_0
__A = (5 * (century % 4) + 2) % 7
__A = year % 1_0_0
__A = centurian % 1_2
__A = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__A = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__A = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 0 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase( __lowerCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorPipeline
__SCREAMING_SNAKE_CASE : int = ['''prompt''']
__SCREAMING_SNAKE_CASE : Tuple = ['''prompt''', '''negative_prompt''']
__SCREAMING_SNAKE_CASE : Any = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
__SCREAMING_SNAKE_CASE : Tuple = False
@property
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return 3_2
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return 3_2
@property
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return 1_0_0
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Union[str, Any] = {
'num_attention_heads': 2,
'attention_head_dim': 1_2,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
__a : Optional[Any] = PriorTransformer(**SCREAMING_SNAKE_CASE__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a : Tuple = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
__a : int = CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE__ )
return model
@property
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : Optional[Any] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=2_2_4 , )
return image_processor
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Union[str, Any] = self.dummy_prior
__a : int = self.dummy_image_encoder
__a : Optional[Any] = self.dummy_text_encoder
__a : Tuple = self.dummy_tokenizer
__a : Dict = self.dummy_image_processor
__a : Tuple = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=10.0 , )
__a : Any = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any=0 ):
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
__a : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__a : str = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__a : Tuple = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Union[str, Any] = 'cpu'
__a : str = self.get_dummy_components()
__a : Optional[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
__a : List[Any] = output.image_embeds
__a : Optional[int] = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
__a : List[Any] = image[0, -1_0:]
__a : Union[str, Any] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
__a : Optional[int] = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : str = torch_device == 'cpu'
__a : Tuple = True
__a : Optional[Any] = False
self._test_inference_batch_single_identical(
test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , )
@skip_mps
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : Optional[int] = torch_device == 'cpu'
__a : Any = False
self._test_attention_slicing_forward_pass(
test_max_difference=SCREAMING_SNAKE_CASE__ , test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , )
| 47 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase ( a_ = "isbn/0140328726" ) -> dict:
"""simple docstring"""
__A = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
__A = F'''{olid} is not a valid Open Library olid'''
raise ValueError(a_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def UpperCAmelCase ( a_ ) -> dict:
"""simple docstring"""
__A = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
__A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__A = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
__A = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(a_ , a_ ):
__A = ", ".join(a_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE :int = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
SCREAMING_SNAKE_CASE :Any = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 55 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Optional[int] = ['image_processor', 'tokenizer']
snake_case__ :List[str] = 'BlipImageProcessor'
snake_case__ :Any = 'AutoTokenizer'
def __init__( self : Any , __magic_name__ : str , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = False
super().__init__(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = self.image_processor
def __call__( self : Any , __magic_name__ : ImageInput = None , __magic_name__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : str , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
lowerCAmelCase__ = self.tokenizer
lowerCAmelCase__ = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
return text_encoding
# add pixel_values
lowerCAmelCase__ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ )
if text is not None:
lowerCAmelCase__ = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
else:
lowerCAmelCase__ = None
if text_encoding is not None:
encoding_image_processor.update(__magic_name__ )
return encoding_image_processor
def __SCREAMING_SNAKE_CASE ( self : List[str] , *__magic_name__ : Optional[int] , **__magic_name__ : Optional[int] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , *__magic_name__ : Any , **__magic_name__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer.model_input_names
lowerCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 48 |
import requests
SCREAMING_SNAKE_CASE :List[str] = 'YOUR API KEY'
def UpperCAmelCase ( a_ , a_ = giphy_api_key ) -> list:
"""simple docstring"""
__A = "+".join(query.split() )
__A = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
__A = requests.get(a_ ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 55 | 0 |
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowercase__ ( snake_case_ :Dict ):
__UpperCAmelCase = {}
__UpperCAmelCase = job['''started_at''']
__UpperCAmelCase = job['''completed_at''']
__UpperCAmelCase = date_parser.parse(snake_case_ )
__UpperCAmelCase = date_parser.parse(snake_case_ )
__UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__UpperCAmelCase = start
__UpperCAmelCase = end
__UpperCAmelCase = duration_in_min
return job_info
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any]=None ):
__UpperCAmelCase = None
if token is not None:
__UpperCAmelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
__UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__UpperCAmelCase = requests.get(snake_case_ , headers=snake_case_ ).json()
__UpperCAmelCase = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(snake_case_ ) for job in result['''jobs''']} )
__UpperCAmelCase = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(snake_case_ ):
__UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=snake_case_ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(snake_case_ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
_lowercase : Tuple = parser.parse_args()
_lowercase : List[str] = get_job_time(args.workflow_run_id)
_lowercase : List[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v["duration"]}""")
| 49 |
import itertools
import math
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__A = 2
while True:
if is_prime(a_ ):
yield num
num += 1
def UpperCAmelCase ( a_ = 1_0_0_0_1 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , a_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 | 0 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase__ (enum.Enum ):
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = 1
@add_end_docstrings(a )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'generated'
def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
super().__init__(*_lowerCAmelCase ,**_lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCamelCase_ ( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
lowerCamelCase__ = {}
if truncation is not None:
lowerCamelCase__ = truncation
lowerCamelCase__ = generate_kwargs
lowerCamelCase__ = {}
if return_tensors is not None and return_type is None:
lowerCamelCase__ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowerCamelCase__ = return_type
if clean_up_tokenization_spaces is not None:
lowerCamelCase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCamelCase__ = self.tokenizer.encode(_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase )
if len(_lowerCAmelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
lowerCamelCase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
return True
def UpperCamelCase_ ( self ,*_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] ,_lowerCAmelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
lowerCamelCase__ = ([prefix + arg for arg in args[0]],)
lowerCamelCase__ = True
elif isinstance(args[0] ,_lowerCAmelCase ):
lowerCamelCase__ = (prefix + args[0],)
lowerCamelCase__ = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
lowerCamelCase__ = self.tokenizer(*_lowerCAmelCase ,padding=_lowerCAmelCase ,truncation=_lowerCAmelCase ,return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
lowerCamelCase__ = super().__call__(*_lowerCAmelCase ,**_lowerCAmelCase )
if (
isinstance(args[0] ,_lowerCAmelCase )
and all(isinstance(_lowerCAmelCase ,_lowerCAmelCase ) for el in args[0] )
and all(len(_lowerCAmelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=TruncationStrategy.DO_NOT_TRUNCATE ,**_lowerCAmelCase ):
lowerCamelCase__ = self._parse_and_tokenize(_lowerCAmelCase ,truncation=_lowerCAmelCase ,**_lowerCAmelCase )
return inputs
def UpperCamelCase_ ( self ,_lowerCAmelCase ,**_lowerCAmelCase ):
if self.framework == "pt":
lowerCamelCase__ , lowerCamelCase__ = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
lowerCamelCase__ , lowerCamelCase__ = tf.shape(model_inputs["""input_ids"""] ).numpy()
lowerCamelCase__ = generate_kwargs.get("""min_length""" ,self.model.config.min_length )
lowerCamelCase__ = generate_kwargs.get("""max_length""" ,self.model.config.max_length )
self.check_inputs(_lowerCAmelCase ,generate_kwargs["""min_length"""] ,generate_kwargs["""max_length"""] )
lowerCamelCase__ = self.model.generate(**_lowerCAmelCase ,**_lowerCAmelCase )
lowerCamelCase__ = output_ids.shape[0]
if self.framework == "pt":
lowerCamelCase__ = output_ids.reshape(_lowerCAmelCase ,out_b // in_b ,*output_ids.shape[1:] )
elif self.framework == "tf":
lowerCamelCase__ = tf.reshape(_lowerCAmelCase ,(in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=ReturnType.TEXT ,_lowerCAmelCase=False ):
lowerCamelCase__ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowerCamelCase__ = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
lowerCamelCase__ = {
F'''{self.return_name}_text''': self.tokenizer.decode(
_lowerCAmelCase ,skip_special_tokens=_lowerCAmelCase ,clean_up_tokenization_spaces=_lowerCAmelCase ,)
}
records.append(_lowerCAmelCase )
return records
@add_end_docstrings(a )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'summary'
def __call__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
return super().__call__(*_lowerCAmelCase ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(a )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'translation'
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCamelCase_ ( self ,*_lowerCAmelCase ,_lowerCAmelCase=TruncationStrategy.DO_NOT_TRUNCATE ,_lowerCAmelCase=None ,_lowerCAmelCase=None ):
if getattr(self.tokenizer ,"""_build_translation_inputs""" ,_lowerCAmelCase ):
return self.tokenizer._build_translation_inputs(
*_lowerCAmelCase ,return_tensors=self.framework ,truncation=_lowerCAmelCase ,src_lang=_lowerCAmelCase ,tgt_lang=_lowerCAmelCase )
else:
return super()._parse_and_tokenize(*_lowerCAmelCase ,truncation=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,**_lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = super()._sanitize_parameters(**_lowerCAmelCase )
if src_lang is not None:
lowerCamelCase__ = src_lang
if tgt_lang is not None:
lowerCamelCase__ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowerCamelCase__ = kwargs.get("""task""" ,self.task )
lowerCamelCase__ = task.split("""_""" )
if task and len(_lowerCAmelCase ) == 4:
# translation, XX, to YY
lowerCamelCase__ = items[1]
lowerCamelCase__ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
return super().__call__(*_lowerCAmelCase ,**_lowerCAmelCase )
| 50 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
__A = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__A = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(a_ ):
os.makedirs(a_ )
__A = model.state_dict()
def to_tf_var_name(a_ ):
for patt, repl in iter(a_ ):
__A = name.replace(a_ , a_ )
return F'''bert/{name}'''
def create_tf_var(a_ , a_ , a_ ):
__A = tf.dtypes.as_dtype(tensor.dtype )
__A = tf.get_variable(dtype=a_ , shape=tensor.shape , name=a_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__A = to_tf_var_name(a_ )
__A = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__A = torch_tensor.T
__A = create_tf_var(tensor=a_ , name=a_ , session=a_ )
tf.keras.backend.set_value(a_ , a_ )
__A = session.run(a_ )
print(F'''Successfully created {tf_name}: {np.allclose(a_ , a_ )}''' )
__A = tf.train.Saver(tf.trainable_variables() )
saver.save(a_ , os.path.join(a_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def UpperCAmelCase ( a_=None ) -> List[Any]:
"""simple docstring"""
__A = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=a_ , required=a_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=a_ , default=a_ , required=a_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=a_ , required=a_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=a_ , required=a_ , help="Directory in which to save tensorflow model" )
__A = parser.parse_args(a_ )
__A = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 55 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["torch", "transformers", "onnx"]
def __init__( self : Tuple , *a__ : Dict , **a__ : Optional[Any] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : List[str] , *a__ : Union[str, Any] , **a__ : Optional[int] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : Optional[Any] , *a__ : List[Any] , **a__ : Union[str, Any] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["torch", "transformers", "onnx"]
def __init__( self : Union[str, Any] , *a__ : int , **a__ : Optional[int] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : Tuple , *a__ : Union[str, Any] , **a__ : Any ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : List[Any] , *a__ : List[str] , **a__ : List[Any] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["torch", "transformers", "onnx"]
def __init__( self : Dict , *a__ : int , **a__ : str ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : Tuple , *a__ : str , **a__ : Optional[int] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : Optional[Any] , *a__ : int , **a__ : str ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["torch", "transformers", "onnx"]
def __init__( self : Optional[Any] , *a__ : Union[str, Any] , **a__ : List[str] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : Optional[int] , *a__ : Union[str, Any] , **a__ : Any ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : Optional[int] , *a__ : str , **a__ : int ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["torch", "transformers", "onnx"]
def __init__( self : Optional[int] , *a__ : Dict , **a__ : List[Any] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : int , *a__ : List[str] , **a__ : str ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : int , *a__ : int , **a__ : Any ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["torch", "transformers", "onnx"]
def __init__( self : Any , *a__ : Tuple , **a__ : int ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : int , *a__ : List[Any] , **a__ : str ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : Dict , *a__ : Tuple , **a__ : Tuple ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 51 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :Any = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 | 0 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :list) -> float:
if not nums:
raise ValueError('''List is empty''')
return sum(a_) / len(a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE :int = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def UpperCAmelCase ( a_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__A = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
__A = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
__A = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 55 | 0 |
import os
import time
import numpy as np
import onnxruntime as ort
_snake_case : Dict = '1'
_snake_case : str = '0'
_snake_case : Optional[Any] = '1'
_snake_case : int = ort.SessionOptions()
_snake_case : Optional[Any] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
_snake_case : List[Any] = ['TensorrtExecutionProvider', 'CUDAExecutionProvider']
_snake_case : Any = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
_snake_case : Tuple = ort.RunOptions()
_snake_case : Any = 128
_snake_case : str = 1
_snake_case : Tuple = np.ones((batch, sequence), dtype=np.intaa)
_snake_case : int = np.ones((batch, sequence), dtype=np.intaa)
_snake_case : List[str] = np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
_snake_case : str = time.time()
_snake_case : Tuple = 2000
_snake_case : Any = {}
for iter in range(max_iters):
_snake_case : Tuple = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1000 / max_iters))
| 53 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] ):
__A = tempfile.mkdtemp()
__A = BlipImageProcessor()
__A = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
__A = BlipaProcessor(A ,A )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ,**A : int ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).tokenizer
def UpperCamelCase_ ( self : Dict ,**A : Optional[int] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).image_processor
def UpperCamelCase_ ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : Optional[int] ):
__A = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
__A = [Image.fromarray(np.moveaxis(A ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Any ):
__A = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
__A = self.get_image_processor(do_normalize=A ,padding_value=1.0 )
__A = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=A ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = self.prepare_image_inputs()
__A = image_processor(A ,return_tensors="np" )
__A = processor(images=A ,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self : Tuple ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = processor(text=A )
__A = tokenizer(A ,return_token_type_ids=A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self : int ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A = processor.batch_decode(A )
__A = tokenizer.batch_decode(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
| 55 | 0 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class A ( tf.keras.layers.Layer ):
def __init__( self: Optional[Any] , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int = None , _lowerCAmelCase: int = None ) -> List[Any]:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ =pad_token_id
UpperCAmelCase_ =max_length
UpperCAmelCase_ =vocab
UpperCAmelCase_ =merges
UpperCAmelCase_ =BytePairTokenizer(_lowerCAmelCase , _lowerCAmelCase , sequence_length=_lowerCAmelCase )
@classmethod
def lowerCAmelCase__ ( cls: Union[str, Any] , _lowerCAmelCase: GPTaTokenizer , *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[" ".join(_lowerCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
UpperCAmelCase_ =tokenizer.get_vocab()
return cls(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def lowerCAmelCase__ ( cls: Dict , _lowerCAmelCase: Union[str, os.PathLike] , *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =GPTaTokenizer.from_pretrained(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
return cls.from_tokenizer(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def lowerCAmelCase__ ( cls: List[str] , _lowerCAmelCase: Optional[int] ) -> List[Any]:
'''simple docstring'''
return cls(**_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict ) -> int:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int = None ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.tf_tokenizer(_lowerCAmelCase )
UpperCAmelCase_ =tf.ones_like(_lowerCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
UpperCAmelCase_ =max_length if max_length is not None else self.max_length
if max_length is not None:
UpperCAmelCase_ , UpperCAmelCase_ =pad_model_inputs(
_lowerCAmelCase , max_seq_length=_lowerCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,A : Any ,A : List[str] ,A : Union[str, Any]=10_24 ,A : int=10_24 ,A : Optional[Any]=3.6 ):
__A = tokenizer
__A = tokenizer.bos_token_id
__A = dataset
__A = seq_length
__A = seq_length * chars_per_token * num_of_sequences
def __iter__( self : List[Any] ):
__A = iter(self.dataset )
__A = True
while more_examples:
__A , __A = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
__A = False
break
__A = tokenizer(A ,truncation=A )["input_ids"]
__A = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 ,len(A ) ,self.seq_length ):
__A = all_token_ids[i : i + self.seq_length]
if len(A ) == self.seq_length:
yield torch.tensor(A )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = {"streaming": True}
__A = load_dataset(args.dataset_name , split="train" , **a_ )
__A = ConstantLengthDataset(a_ , a_ , seq_length=args.seq_length )
__A = DataLoader(a_ , batch_size=args.batch_size )
return eval_dataloader
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
model.eval()
__A = []
for step, batch in enumerate(a_ ):
with torch.no_grad():
__A = model(a_ , labels=a_ )
__A = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(a_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__A = torch.mean(torch.cat(a_ ) )
try:
__A = torch.exp(a_ )
except OverflowError:
__A = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
SCREAMING_SNAKE_CASE :Optional[int] = Accelerator()
# Parse configuration
SCREAMING_SNAKE_CASE :str = HfArgumentParser(EvaluationArguments)
SCREAMING_SNAKE_CASE :int = parser.parse_args()
set_seed(args.seed)
# Logging
SCREAMING_SNAKE_CASE :Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
SCREAMING_SNAKE_CASE :List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
SCREAMING_SNAKE_CASE :int = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
SCREAMING_SNAKE_CASE :List[str] = create_dataloader(args)
# Prepare everything with our `accelerator`.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 55 | 0 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_a , _a , _a : Union[str, Any] = False, False, False
@dataclass
class _lowercase :
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : Optional[str] = None
# Automatically constructed
_SCREAMING_SNAKE_CASE : ClassVar[str] = "dict"
_SCREAMING_SNAKE_CASE : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
_SCREAMING_SNAKE_CASE : str = field(default="Audio" , init=__lowercase , repr=__lowercase )
def __call__( self : Optional[Any] ) -> Tuple:
return self.pa_type
def a ( self : int , SCREAMING_SNAKE_CASE_ : Union[str, bytes, dict] ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return {"bytes": None, "path": value}
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__snake_case = BytesIO()
sf.write(SCREAMING_SNAKE_CASE_ , value['array'] , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__snake_case = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 3_2767
else:
__snake_case = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 3_2767
__snake_case = BytesIO(bytes() )
sf.write(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
__snake_case , __snake_case = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
__snake_case = xsplitext(SCREAMING_SNAKE_CASE_ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
if file is None:
__snake_case = token_per_repo_id or {}
__snake_case = path.split('::' )[-1]
try:
__snake_case = string_to_dict(SCREAMING_SNAKE_CASE_ , config.HUB_DATASETS_URL )['repo_id']
__snake_case = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__snake_case = None
with xopen(SCREAMING_SNAKE_CASE_ , 'rb' , use_auth_token=SCREAMING_SNAKE_CASE_ ) as f:
__snake_case , __snake_case = sf.read(SCREAMING_SNAKE_CASE_ )
else:
__snake_case , __snake_case = sf.read(SCREAMING_SNAKE_CASE_ )
__snake_case = array.T
if self.mono:
__snake_case = librosa.to_mono(SCREAMING_SNAKE_CASE_ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__snake_case = librosa.resample(SCREAMING_SNAKE_CASE_ , orig_sr=SCREAMING_SNAKE_CASE_ , target_sr=self.sampling_rate )
__snake_case = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def a ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
__snake_case = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.binary() )
__snake_case = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__snake_case = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.string() )
__snake_case = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
__snake_case = pa.array([Audio().encode_example(SCREAMING_SNAKE_CASE_ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
__snake_case = storage.field('bytes' )
else:
__snake_case = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
__snake_case = storage.field('path' )
else:
__snake_case = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.string() )
__snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
return array_cast(SCREAMING_SNAKE_CASE_ , self.pa_type )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : pa.StructArray ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(SCREAMING_SNAKE_CASE_ : int ):
with xopen(SCREAMING_SNAKE_CASE_ , 'rb' ) as f:
__snake_case = f.read()
return bytes_
__snake_case = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__snake_case = pa.array(
[os.path.basename(SCREAMING_SNAKE_CASE_ ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
__snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE_ , self.pa_type )
| 56 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = LayoutLMTokenizer
snake_case_ = LayoutLMTokenizerFast
snake_case_ = True
snake_case_ = True
def UpperCamelCase_ ( self : Any ):
super().setUp()
__A = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self : Tuple ,**A : int ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : Optional[Any] ,A : Any ):
__A = "UNwant\u00E9d,running"
__A = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ ( self : str ):
__A = self.tokenizer_class(self.vocab_file )
__A = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(A ,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ ( self : int ):
pass
| 55 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ : Optional[Any] = 16
A_ : Tuple = 32
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ = 1_6 , UpperCAmelCase__ = "bert-base-cased" ) -> Optional[int]:
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
UpperCamelCase_: int = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_: int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase_: Union[str, Any] = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_: Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase__ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return tokenizer.pad(UpperCAmelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
UpperCamelCase_: Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
UpperCamelCase_: List[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
return train_dataloader, eval_dataloader
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]:
model.eval()
UpperCamelCase_: Any = 0
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase_: Dict = model(**UpperCAmelCase__ )
UpperCamelCase_: List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase_ ,UpperCamelCase_: Dict = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase__ ) - 1:
UpperCamelCase_: List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase_: Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , )
UpperCamelCase_: Any = metric.compute()
return eval_metric["accuracy"]
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
# Initialize accelerator
UpperCamelCase_: Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_: int = config['lr']
UpperCamelCase_: List[Any] = int(config['num_epochs'] )
UpperCamelCase_: Dict = int(config['seed'] )
UpperCamelCase_: Optional[Any] = int(config['batch_size'] )
UpperCamelCase_: int = args.model_name_or_path
set_seed(UpperCAmelCase__ )
UpperCamelCase_ ,UpperCamelCase_: List[str] = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_: Dict = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
# Instantiate optimizer
UpperCamelCase_: List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase_: Any = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase_: Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
UpperCamelCase_: List[Any] = 1
UpperCamelCase_: List[Any] = (len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase_: Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase__ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase__ , )
else:
UpperCamelCase_: Union[str, Any] = DummyScheduler(UpperCAmelCase__ , total_num_steps=UpperCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Optional[int] = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase_: Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase_: str = 0
UpperCamelCase_: int = evaluate.load('glue' , 'mrpc' )
UpperCamelCase_: int = num_epochs
if args.partial_train_epoch is not None:
UpperCamelCase_: Union[str, Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCamelCase_: Union[str, Any] = args.resume_from_checkpoint.split('epoch_' )[1]
UpperCamelCase_: Tuple = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCamelCase_: Optional[int] = int(UpperCAmelCase__ ) + 1
UpperCamelCase_: str = evaluation_loop(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
accelerator.print('resumed checkpoint performance:' , UpperCAmelCase__ )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , 'r' ) as f:
UpperCamelCase_: Dict = json.load(UpperCAmelCase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCamelCase_: Any = {}
for epoch in range(UpperCAmelCase__ , UpperCAmelCase__ ):
model.train()
for step, batch in enumerate(UpperCAmelCase__ ):
UpperCamelCase_: str = model(**UpperCAmelCase__ )
UpperCamelCase_: List[str] = outputs.loss
UpperCamelCase_: str = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCamelCase_: Tuple = F'''epoch_{epoch}'''
UpperCamelCase_: Union[str, Any] = os.path.join(args.output_dir , UpperCAmelCase__ )
accelerator.save_state(UpperCAmelCase__ )
UpperCamelCase_: List[Any] = evaluation_loop(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = accuracy
UpperCamelCase_: List[str] = lr_scheduler.get_lr()[0]
UpperCamelCase_: Optional[Any] = optimizer.param_groups[0]['lr']
UpperCamelCase_: Dict = epoch
UpperCamelCase_: Optional[int] = overall_step
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , 'w' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case () -> Optional[Any]:
UpperCamelCase_: Optional[int] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase__ , )
parser.add_argument(
'--output_dir' , type=UpperCAmelCase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=UpperCAmelCase__ , default=2 , help='Number of train epochs.' , )
UpperCamelCase_: str = parser.parse_args()
UpperCamelCase_: Dict = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 57 |
SCREAMING_SNAKE_CASE :int = {str(digit): digit**5 for digit in range(10)}
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a_ ) )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(a_ ) )
if __name__ == "__main__":
print(solution())
| 55 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : int = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''data2vec-text'''
def __init__( self , _lowercase=3_0_5_2_2 , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , _lowercase=None , **_lowercase , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
snake_case_ : List[Any] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Tuple = hidden_act
snake_case_ : str = intermediate_size
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : str = type_vocab_size
snake_case_ : Tuple = initializer_range
snake_case_ : Optional[Any] = layer_norm_eps
snake_case_ : Optional[int] = position_embedding_type
snake_case_ : List[Any] = use_cache
snake_case_ : Tuple = classifier_dropout
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 58 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
__A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A ,"tf_padding" ) )
self.parent.assertTrue(hasattr(A ,"depth_multiplier" ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ,A : int ,A : List[Any]=13 ,A : int=3 ,A : Optional[Any]=32 ,A : Union[str, Any]=0.25 ,A : Tuple=8 ,A : Optional[int]=True ,A : Union[str, Any]=10_24 ,A : Any=32 ,A : Optional[int]="relu6" ,A : int=0.1 ,A : Optional[Any]=0.02 ,A : Optional[Any]=True ,A : List[str]=True ,A : str=10 ,A : str=None ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = depth_multiplier
__A = min_depth
__A = tf_padding
__A = int(last_hidden_size * depth_multiplier )
__A = output_stride
__A = hidden_act
__A = classifier_dropout_prob
__A = use_labels
__A = is_training
__A = num_labels
__A = initializer_range
__A = scope
def UpperCamelCase_ ( self : Optional[int] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.num_labels )
__A = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
__A = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,min_depth=self.min_depth ,tf_padding=self.tf_padding ,hidden_act=self.hidden_act ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : Optional[int] ,A : str ,A : Tuple ,A : Optional[int] ,A : List[str] ):
__A = MobileNetVaModel(config=A )
model.to(A )
model.eval()
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase_ ( self : List[Any] ,A : Union[str, Any] ,A : List[Any] ,A : int ,A : Union[str, Any] ):
__A = self.num_labels
__A = MobileNetVaForImageClassification(A )
model.to(A )
model.eval()
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Tuple ):
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Any ):
__A = MobileNetVaModelTester(self )
__A = MobileNetVaConfigTester(self ,config_class=A ,has_text_modality=A )
def UpperCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def UpperCamelCase_ ( self : Any ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[int] ):
def check_hidden_states_output(A : List[Any] ,A : List[Any] ,A : Optional[int] ):
__A = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(A ,A ) )
__A = outputs.hidden_states
__A = 26
self.assertEqual(len(A ) ,A )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(A ,A ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = MobileNetVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : List[str] ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
__A = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
__A = model(**A )
# verify the logits
__A = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape ,A )
__A = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 59 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str ,A : int ,A : int=2 ,A : Optional[Any]=3 ,A : Dict=4 ,A : Optional[int]=2 ,A : Union[str, Any]=7 ,A : List[str]=True ,A : Union[str, Any]=True ,A : Optional[int]=True ,A : Optional[int]=True ,A : Tuple=99 ,A : Optional[int]=36 ,A : Dict=3 ,A : str=4 ,A : Optional[Any]=37 ,A : Dict="gelu" ,A : Dict=0.1 ,A : Union[str, Any]=0.1 ,A : Union[str, Any]=5_12 ,A : Any=16 ,A : Union[str, Any]=2 ,A : List[Any]=0.02 ,A : List[Any]=6 ,A : Optional[int]=6 ,A : List[Any]=3 ,A : Union[str, Any]=4 ,A : Tuple=None ,A : List[str]=10_00 ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = patch_size
__A = text_seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = coordinate_size
__A = shape_size
__A = num_labels
__A = num_choices
__A = scope
__A = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__A = text_seq_length
__A = (image_size // patch_size) ** 2 + 1
__A = self.text_seq_length + self.image_seq_length
def UpperCamelCase_ ( self : int ):
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
__A = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__A = bbox[i, j, 3]
__A = bbox[i, j, 1]
__A = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__A = bbox[i, j, 2]
__A = bbox[i, j, 0]
__A = t
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.text_seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
__A = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self : Optional[int] ,A : List[str] ,A : Any ,A : Dict ,A : List[Any] ,A : Optional[int] ,A : Any ,A : Dict ,A : List[Any] ):
__A = LayoutLMvaModel(config=A )
model.to(A )
model.eval()
# text + image
__A = model(A ,pixel_values=A )
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A ,token_type_ids=A )
__A = model(A ,bbox=A ,pixel_values=A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__A = model(pixel_values=A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] ,A : Dict ,A : List[str] ,A : Any ,A : List[Any] ,A : Any ,A : Any ,A : Dict ,A : Optional[Any] ):
__A = self.num_labels
__A = LayoutLMvaForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : str ,A : Optional[Any] ,A : Dict ,A : str ,A : Tuple ,A : Union[str, Any] ,A : List[Any] ,A : Any ,A : Union[str, Any] ):
__A = self.num_labels
__A = LayoutLMvaForTokenClassification(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : int ,A : str ,A : List[str] ,A : int ,A : List[str] ,A : List[str] ,A : Dict ):
__A = LayoutLMvaForQuestionAnswering(config=A )
model.to(A )
model.eval()
__A = model(
A ,bbox=A ,pixel_values=A ,attention_mask=A ,token_type_ids=A ,start_positions=A ,end_positions=A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : str ,A : Any ,A : Any ,A : Tuple ,A : List[Any] ,A : Optional[Any] ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = LayoutLMvaModelTester(self )
__A = ConfigTester(self ,config_class=A ,hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ,A : int ,A : List[str] ,A : Dict=False ):
__A = copy.deepcopy(A )
if model_class in get_values(A ):
__A = {
k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous()
if isinstance(A ,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
__A = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in get_values(A ):
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
elif model_class in [
*get_values(A ),
]:
__A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=A ,)
return inputs_dict
def UpperCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A = type
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = LayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Any ):
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Dict ):
__A = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).pixel_values.to(A )
__A = torch.tensor([[1, 2]] )
__A = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__A = model(
input_ids=input_ids.to(A ) ,bbox=bbox.to(A ) ,pixel_values=pixel_values.to(A ) ,)
# verify the logits
__A = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape ,A )
__A = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowerCAmelCase_ = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase_ = {
'''RUCAIBox/mvp''': 1_0_2_4,
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = VOCAB_FILES_NAMES
lowerCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Tuple = ['''input_ids''', '''attention_mask''']
lowerCamelCase_ : str = MvpTokenizer
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__="replace" , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="</s>" , __magic_name__="<s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__=False , __magic_name__=True , **__magic_name__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , )
snake_case_ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __magic_name__ ) != add_prefix_space:
snake_case_ : Optional[int] = getattr(__magic_name__ , pre_tok_state.pop('''type''' ) )
snake_case_ : Any = add_prefix_space
snake_case_ : List[Any] = pre_tok_class(**__magic_name__ )
snake_case_ : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ : Tuple = '''post_processor'''
snake_case_ : Optional[Any] = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
if tokenizer_component_instance:
snake_case_ : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ : List[str] = tuple(state['''sep'''] )
if "cls" in state:
snake_case_ : str = tuple(state['''cls'''] )
snake_case_ : Tuple = False
if state.get('''add_prefix_space''' , __magic_name__ ) != add_prefix_space:
snake_case_ : int = add_prefix_space
snake_case_ : Dict = True
if state.get('''trim_offsets''' , __magic_name__ ) != trim_offsets:
snake_case_ : Optional[Any] = trim_offsets
snake_case_ : str = True
if changes_to_apply:
snake_case_ : Optional[Any] = getattr(__magic_name__ , state.pop('''type''' ) )
snake_case_ : Optional[int] = component_class(**__magic_name__ )
setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
@property
def lowerCamelCase (self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value
snake_case_ : Union[str, Any] = value
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Any = kwargs.get('''is_split_into_words''' , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Tuple = kwargs.get('''is_split_into_words''' , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__=None ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ : int = [self.sep_token_id]
snake_case_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 60 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : List[str] ,A : str=7 ,A : Optional[Any]=3 ,A : Any=18 ,A : int=30 ,A : int=4_00 ,A : List[str]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=True ,A : Tuple=None ,A : Tuple=True ,A : Union[str, Any]=[0.5, 0.5, 0.5] ,A : str=[0.5, 0.5, 0.5] ,A : List[Any]=False ,):
__A = size if size is not None else {"height": 20, "width": 20}
__A = crop_size if crop_size is not None else {"height": 18, "width": 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_center_crop
__A = crop_size
__A = do_normalize
__A = image_mean
__A = image_std
__A = do_reduce_labels
def UpperCamelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(dataset[0]["file"] )
__A = Image.open(dataset[1]["file"] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A = Image.open(ds[0]["file"] )
__A = Image.open(ds[1]["file"] )
__A = Image.open(ds[2]["file"] )
__A = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = BeitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : List[Any] ):
__A = BeitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : int ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size" ) )
self.assertTrue(hasattr(A ,"do_center_crop" ) )
self.assertTrue(hasattr(A ,"center_crop" ) )
self.assertTrue(hasattr(A ,"do_normalize" ) )
self.assertTrue(hasattr(A ,"image_mean" ) )
self.assertTrue(hasattr(A ,"image_std" ) )
def UpperCamelCase_ ( self : List[str] ):
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels ,A )
__A = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=A )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels ,A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[str] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : str ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
__A = []
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__A = image_processing(image_inputs[0] ,maps[0] ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test not batched input (PIL images)
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
# Test batched input (PIL images)
__A , __A = prepare_semantic_batch_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(
encoding["labels"].shape ,(
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
self.assertEqual(encoding["labels"].dtype ,torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__A , __A = prepare_semantic_single_inputs()
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 1_50 )
__A = True
__A = image_processing(A ,A ,return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_55 )
| 55 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def _A ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
"""simple docstring"""
for attribute in key.split("." ):
lowerCAmelCase__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
lowerCAmelCase__ = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == "group" , )
lowerCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(lowerCAmelCase_ )[0].split("." )[-2]
lowerCAmelCase__ = mapped_key.replace("*" , lowerCAmelCase_ )
if "weight_g" in name:
lowerCAmelCase__ = "weight_g"
elif "weight_v" in name:
lowerCAmelCase__ = "weight_v"
elif "weight" in name:
lowerCAmelCase__ = "weight"
elif "bias" in name:
lowerCAmelCase__ = "bias"
else:
lowerCAmelCase__ = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def _A ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = full_name.split("conv_layers." )[-1]
lowerCAmelCase__ = name.split("." )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCAmelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCAmelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCAmelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCAmelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = SEWConfig()
if is_finetuned:
lowerCAmelCase__ = model.wav_encoder.wav_model.cfg
else:
lowerCAmelCase__ = model.cfg
lowerCAmelCase__ = fs_config.conv_bias
lowerCAmelCase__ = eval(fs_config.conv_feature_layers )
lowerCAmelCase__ = [x[0] for x in conv_layers]
lowerCAmelCase__ = [x[1] for x in conv_layers]
lowerCAmelCase__ = [x[2] for x in conv_layers]
lowerCAmelCase__ = "gelu"
lowerCAmelCase__ = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = fs_config.activation_fn.name
lowerCAmelCase__ = fs_config.encoder_embed_dim
lowerCAmelCase__ = 0.02
lowerCAmelCase__ = fs_config.encoder_ffn_embed_dim
lowerCAmelCase__ = 1E-5
lowerCAmelCase__ = fs_config.encoder_layerdrop
lowerCAmelCase__ = fs_config.encoder_attention_heads
lowerCAmelCase__ = fs_config.conv_pos_groups
lowerCAmelCase__ = fs_config.conv_pos
lowerCAmelCase__ = len(lowerCAmelCase_ )
lowerCAmelCase__ = fs_config.encoder_layers
lowerCAmelCase__ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowerCAmelCase__ = model.cfg
lowerCAmelCase__ = fs_config.final_dropout
lowerCAmelCase__ = fs_config.layerdrop
lowerCAmelCase__ = fs_config.activation_dropout
lowerCAmelCase__ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowerCAmelCase__ = fs_config.attention_dropout
lowerCAmelCase__ = fs_config.dropout_input
lowerCAmelCase__ = fs_config.dropout
lowerCAmelCase__ = fs_config.mask_channel_length
lowerCAmelCase__ = fs_config.mask_channel_prob
lowerCAmelCase__ = fs_config.mask_length
lowerCAmelCase__ = fs_config.mask_prob
lowerCAmelCase__ = "Wav2Vec2FeatureExtractor"
lowerCAmelCase__ = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def _A ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[int]=True ):
"""simple docstring"""
if is_finetuned:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowerCAmelCase__ = SEWConfig.from_pretrained(lowerCAmelCase_ )
else:
lowerCAmelCase__ = convert_config(model[0] , lowerCAmelCase_ )
lowerCAmelCase__ = model[0].eval()
lowerCAmelCase__ = True if config.feat_extract_norm == "layer" else False
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
if is_finetuned:
if dict_path:
lowerCAmelCase__ = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.eos_index
lowerCAmelCase__ = len(target_dict.symbols )
lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , "vocab.json" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , lowerCAmelCase_ )
lowerCAmelCase__ = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase_ , )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
lowerCAmelCase__ = SEWForCTC(lowerCAmelCase_ )
else:
lowerCAmelCase__ = SEWModel(lowerCAmelCase_ )
feature_extractor.save_pretrained(lowerCAmelCase_ )
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
hf_model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCamelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 61 |
from numpy import exp, pi, sqrt
def UpperCAmelCase ( a_ , a_ = 0.0 , a_ = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 0 |
snake_case = {"""a""": ["""c""", """b"""], """b""": ["""d""", """e"""], """c""": [], """d""": [], """e""": []}
snake_case = ["""a""", """b""", """c""", """d""", """e"""]
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = start
# add current to visited
visited.append(lowercase )
SCREAMING_SNAKE_CASE : Any = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
SCREAMING_SNAKE_CASE : Union[str, Any] = topological_sort(lowercase , lowercase , lowercase )
# if all neighbors visited add current to sort
sort.append(lowercase )
# if all vertices haven't been visited select a new one to visit
if len(lowercase ) != len(lowercase ):
for vertice in vertices:
if vertice not in visited:
SCREAMING_SNAKE_CASE : List[Any] = topological_sort(lowercase , lowercase , lowercase )
# return sort
return sort
if __name__ == "__main__":
snake_case = topological_sort("""a""", [], [])
print(sort)
| 62 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase_ ( self : Optional[int] ):
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__A = "xvjiarui/stable-diffusion-2-inpainting"
__A , __A = FlaxStableDiffusionInpaintPipeline.from_pretrained(A ,safety_checker=A )
__A = "Face of a yellow cat, high resolution, sitting on a park bench"
__A = jax.random.PRNGKey(0 )
__A = 50
__A = jax.device_count()
__A = num_samples * [prompt]
__A = num_samples * [init_image]
__A = num_samples * [mask_image]
__A , __A , __A = pipeline.prepare_inputs(A ,A ,A )
# shard inputs and rng
__A = replicate(A )
__A = jax.random.split(A ,jax.device_count() )
__A = shard(A )
__A = shard(A )
__A = shard(A )
__A = pipeline(
A ,A ,A ,A ,A ,A ,jit=A )
__A = output.images.reshape(A ,5_12 ,5_12 ,3 )
__A = images[0, 2_53:2_56, 2_53:2_56, -1]
__A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__A = jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 55 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,A : Optional[int] ,A : Optional[int]=7 ,A : Optional[Any]=3 ,A : List[str]=18 ,A : Any=30 ,A : Tuple=4_00 ,A : Union[str, Any]=True ,A : Optional[Any]=32 ,A : Union[str, Any]=True ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size_divisor
__A = do_rescale
def UpperCamelCase_ ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = GLPNImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : int ):
__A = GLPNImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Any ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize" ) )
self.assertTrue(hasattr(A ,"size_divisor" ) )
self.assertTrue(hasattr(A ,"resample" ) )
self.assertTrue(hasattr(A ,"do_rescale" ) )
def UpperCamelCase_ ( self : str ):
pass
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : Optional[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 55 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase_ : int = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ) -> None:
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 64 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"image": Image()} )
snake_case_ = Features({"labels": ClassLabel} )
snake_case_ = "image"
snake_case_ = "labels"
def UpperCamelCase_ ( self : Optional[Any] ,A : Tuple ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] ,A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__A = copy.deepcopy(self )
__A = self.label_schema.copy()
__A = features[self.label_column]
__A = label_schema
return task_template
@property
def UpperCamelCase_ ( self : Any ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 55 | 0 |
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : Any ):
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase__ : Optional[int] = mock.Mock()
UpperCAmelCase__ : List[Any] = 500
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : List[str] = HTTPError
UpperCAmelCase__ : str = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase__ : int = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=A ) as mock_head:
UpperCAmelCase__ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __lowercase ( self : int ):
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase__ : List[str] = mock.Mock()
UpperCAmelCase__ : List[str] = 500
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : Any = HTTPError
UpperCAmelCase__ : Any = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase__ : List[Any] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=A ) as mock_head:
UpperCAmelCase__ : List[Any] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : List[str] ):
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase__ : List[str] = tempfile.mktemp()
with open(A ,"""wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,A )
UpperCAmelCase__ : Any = AlbertTokenizer.from_pretrained(A )
finally:
os.remove(A )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" ,"""wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,A )
UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase__ : Tuple = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class __lowercase ( unittest.TestCase ):
snake_case_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def __lowercase ( cls : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = TOKEN
HfFolder.save_token(A )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def __lowercase ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : List[Any] = os.path.join(A ,"""vocab.txt""" )
with open(A ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase__ : List[Any] = BertTokenizer(A )
tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase__ : List[Any] = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A ,repo_id="""test-tokenizer""" ,push_to_hub=A ,use_auth_token=self._token )
UpperCAmelCase__ : str = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def __lowercase ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Dict = os.path.join(A ,"""vocab.txt""" )
with open(A ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase__ : Union[str, Any] = BertTokenizer(A )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token )
UpperCAmelCase__ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=A ,use_auth_token=self._token )
UpperCAmelCase__ : List[str] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def __lowercase ( self : str ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Optional[Any] = os.path.join(A ,"""vocab.txt""" )
with open(A ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase__ : Tuple = CustomTokenizer(A )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer" ,trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : List[str] = os.path.join(A ,"""vocab.txt""" )
with open(A ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase__ : Dict = BertTokenizerFast.from_pretrained(A )
bert_tokenizer.save_pretrained(A )
UpperCAmelCase__ : Dict = CustomTokenizerFast.from_pretrained(A )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer" ,trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(
f"{USER}/test-dynamic-tokenizer" ,use_fast=A ,trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase__ : Dict = Trie()
UpperCAmelCase__ : Optional[int] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(A ,["""AB""", """C"""] )
| 65 |
from math import sqrt
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' must been an int and positive"
__A = True
# 0 and 1 are none primes.
if number <= 1:
__A = False
for divisor in range(2 , int(round(sqrt(a_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__A = False
break
# precondition
assert isinstance(a_ , a_ ), "'status' must been from type bool"
return status
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__A = list(range(2 , n + 1 ) )
__A = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(a_ ) ):
for j in range(i + 1 , len(a_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__A = 0
# filters actual prime numbers.
__A = [x for x in begin_list if x != 0]
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n > 2), "'N' must been an int and > 2"
__A = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(a_ ):
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and number >= 0, "'number' must been an int and >= 0"
__A = [] # this list will be returns of the function.
# potential prime number factors.
__A = 2
__A = number
if number == 0 or number == 1:
ans.append(a_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(a_ ):
while quotient != 1:
if is_prime(a_ ) and (quotient % factor == 0):
ans.append(a_ )
quotient /= factor
else:
factor += 1
else:
ans.append(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type list"
return ans
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = max(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__A = 0
# prime factorization of 'number'
__A = prime_factorization(a_ )
__A = min(a_ )
# precondition
assert isinstance(a_ , a_ ), "'ans' must been from type int"
return ans
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , a_ ), "compare bust been from type bool"
return number % 2 == 0
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , a_ ), "compare bust been from type bool"
return number % 2 != 0
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ ) and (number > 2) and is_even(a_ )
), "'number' must been an int, even and > 2"
__A = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__A = get_prime_numbers(a_ )
__A = len(a_ )
# run variable for while-loops.
__A = 0
__A = None
# exit variable. for break up the loops
__A = True
while i < len_pn and loop:
__A = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__A = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(a_ , a_ )
and (len(a_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__A = 0
while numbera != 0:
__A = numbera % numbera
__A = numbera
__A = rest
# precondition
assert isinstance(a_ , a_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__A = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__A = prime_factorization(a_ )
__A = prime_factorization(a_ )
elif numbera == 1 or numbera == 1:
__A = []
__A = []
__A = max(a_ , a_ )
__A = 0
__A = 0
__A = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__A = prime_fac_a.count(a_ )
__A = prime_fac_a.count(a_ )
for _ in range(max(a_ , a_ ) ):
ans *= n
else:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__A = prime_fac_a.count(a_ )
for _ in range(a_ ):
ans *= n
done.append(a_ )
# precondition
assert isinstance(a_ , a_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'number' must been a positive int"
__A = 0
__A = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(a_ ):
ans += 1
# precondition
assert isinstance(a_ , a_ ) and is_prime(
a_ ), "'ans' must been a prime number and from type int"
return ans
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
assert (
is_prime(a_ ) and is_prime(a_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__A = p_number_a + 1 # jump to the next number
__A = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
while number < p_number_a:
ans.append(a_ )
number += 1
# fetch the next prime number.
while not is_prime(a_ ):
number += 1
# precondition
assert (
isinstance(a_ , a_ )
and ans[0] != p_number_a
and ans[len(a_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 1), "'n' must been int and >= 1"
__A = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(a_ )
# precondition
assert ans[0] == 1 and ans[len(a_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (
number > 1
), "'number' must been an int and >= 1"
__A = get_divisors(a_ )
# precondition
assert (
isinstance(a_ , a_ )
and (divisors[0] == 1)
and (divisors[len(a_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
assert (
isinstance(a_ , a_ )
and isinstance(a_ , a_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__A = gcd(abs(a_ ) , abs(a_ ) )
# precondition
assert (
isinstance(a_ , a_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been a int and >= 0"
__A = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ) and (n >= 0), "'n' must been an int and >= 0"
__A = 0
__A = 1
__A = 1 # this will be return
for _ in range(n - 1 ):
__A = ans
ans += fiba
__A = tmp
return ans
| 55 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , ):
_lowercase : List[Any] = parent
_lowercase : List[str] = 1_3
_lowercase : str = 7
_lowercase : int = 3_0
_lowercase : Optional[Any] = self.seq_length + self.mem_len
_lowercase : List[str] = 1_5
_lowercase : Tuple = True
_lowercase : Optional[Any] = True
_lowercase : List[Any] = 9_9
_lowercase : int = [1_0, 5_0, 8_0]
_lowercase : Tuple = 3_2
_lowercase : Any = 3_2
_lowercase : int = 4
_lowercase : List[str] = 8
_lowercase : Tuple = 1_2_8
_lowercase : int = 2
_lowercase : Optional[Any] = 2
_lowercase : str = None
_lowercase : int = 1
_lowercase : Tuple = 0
_lowercase : Dict = 3
_lowercase : Union[str, Any] = self.vocab_size - 1
_lowercase : Optional[Any] = 0.01
def __a ( self ):
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : List[Any] = None
if self.use_labels:
_lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Any = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __a ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFTransfoXLModel(_lowerCAmelCase )
_lowercase , _lowercase : Union[str, Any] = model(_lowerCAmelCase ).to_tuple()
_lowercase : Any = {'input_ids': input_ids_a, 'mems': mems_a}
_lowercase , _lowercase : Any = model(_lowerCAmelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Dict = TFTransfoXLLMHeadModel(_lowerCAmelCase )
_lowercase , _lowercase : List[Any] = model(_lowerCAmelCase ).to_tuple()
_lowercase : Optional[int] = {'input_ids': input_ids_a, 'labels': lm_labels}
_lowercase , _lowercase : Union[str, Any] = model(_lowerCAmelCase ).to_tuple()
_lowercase , _lowercase : Dict = model([input_ids_a, mems_a] ).to_tuple()
_lowercase : Any = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
_lowercase , _lowercase : Union[str, Any] = model(_lowerCAmelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = TFTransfoXLForSequenceClassification(_lowerCAmelCase )
_lowercase : Dict = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self ):
_lowercase : Any = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) : str = config_and_inputs
_lowercase : List[Any] = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : List[str] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Optional[Any] = () if is_tf_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : int = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : str = False
_UpperCamelCase : Optional[Any] = False
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __a ( self ):
_lowercase : Optional[Any] = TFTransfoXLModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , d_embed=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
self.model_tester.set_seed()
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_lowerCAmelCase )
def __a ( self ):
self.model_tester.set_seed()
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Optional[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowercase : int = model_class(_lowerCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowercase : Dict = model.get_output_embeddings()
assert isinstance(_lowerCAmelCase , tf.keras.layers.Layer )
_lowercase : List[Any] = model.get_bias()
assert name is None
else:
_lowercase : List[Any] = model.get_output_embeddings()
assert x is None
_lowercase : int = model.get_bias()
assert name is None
def __a ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __a ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Union[str, Any] = TFTransfoXLModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def __a ( self ):
pass
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def __a ( self ):
_lowercase : str = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
_lowercase : Union[str, Any] = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowercase : Optional[int] = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowercase : Union[str, Any] = model.generate(_lowerCAmelCase , max_length=2_0_0 , do_sample=_lowerCAmelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , _lowerCAmelCase )
| 66 |
import os
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__A = os.path.dirname(os.path.realpath(a_ ) )
__A = os.path.join(a_ , "triangle.txt" )
with open(a_ ) as f:
__A = f.readlines()
__A = []
for line in triangle:
__A = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(a_ ) )
a.append(a_ )
for i in range(1 , len(a_ ) ):
for j in range(len(a[i] ) ):
__A = a[i - 1][j] if j != len(a[i - 1] ) else 0
__A = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(a_ , a_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 55 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[int | float] , snake_case__ :int , snake_case__ :int ) -> int | float:
if len(snake_case__ ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(snake_case__ )
or left < -len(snake_case__ )
or right >= len(snake_case__ )
or right < -len(snake_case__ )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
_lowercase = (left + right) >> 1 # the middle
_lowercase = find_max(snake_case__ , snake_case__ , snake_case__ ) # find max in range[left, mid]
_lowercase = find_max(snake_case__ , mid + 1 , snake_case__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 67 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE :Union[str, Any] = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE :List[str] = object()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
__A = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(a_ ) - len(a_ ) + 1 ):
__A = [x.match(a_ ) for x, y in zip(a_ , ks[i:] )]
if matches and all(a_ ):
return True
return False
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
def replace(a_ , a_ ):
for rule, replacement in rules:
if _match(a_ , a_ ):
return replacement
return val
return replace
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , a_ )),
(("transformer", "wte", "embedding"), P("mp" , a_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(a_ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , a_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(a_ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , a_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
__A = _get_partition_rules()
__A = _replacement_rules(a_ )
__A = {k: _unmatched for k in flatten_dict(a_ )}
__A = {k: replace(a_ , a_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(a_ ) )
| 55 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class _A ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'bit'
lowerCamelCase : List[Any] = ['preactivation', 'bottleneck']
lowerCamelCase : List[Any] = ['SAME', 'VALID']
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : Tuple=[256, 512, 1024, 2048] , __SCREAMING_SNAKE_CASE : Optional[Any]=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE : Optional[Any]="preactivation" , __SCREAMING_SNAKE_CASE : int="relu" , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=32 , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : List[str]=1 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Tuple , ) -> List[str]:
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__UpperCAmelCase =global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
__UpperCAmelCase =num_channels
__UpperCAmelCase =embedding_size
__UpperCAmelCase =hidden_sizes
__UpperCAmelCase =depths
__UpperCAmelCase =layer_type
__UpperCAmelCase =hidden_act
__UpperCAmelCase =global_padding
__UpperCAmelCase =num_groups
__UpperCAmelCase =drop_path_rate
__UpperCAmelCase =embedding_dynamic_padding
__UpperCAmelCase =output_stride
__UpperCAmelCase =width_factor
__UpperCAmelCase =["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__UpperCAmelCase , __UpperCAmelCase =get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 68 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] ,A : Union[str, Any] ,A : List[Any]=13 ,A : Optional[Any]=30 ,A : Union[str, Any]=2 ,A : Union[str, Any]=3 ,A : Any=True ,A : Dict=True ,A : str=32 ,A : Tuple=2 ,A : Optional[int]=4 ,A : Tuple=37 ,A : List[Any]="gelu" ,A : Dict=0.1 ,A : Optional[int]=0.1 ,A : List[Any]=10 ,A : Optional[Any]=0.02 ,A : Dict=3 ,A : Dict=None ,A : List[Any]=2 ,):
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = type_sequence_label_size
__A = initializer_range
__A = scope
__A = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__A = (image_size // patch_size) ** 2
__A = num_patches + 2
def UpperCamelCase_ ( self : List[Any] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Optional[int] ):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,A : Optional[int] ,A : Union[str, Any] ):
__A = TFDeiTModel(config=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[Any] ,A : List[Any] ,A : Optional[Any] ,A : Dict ):
__A = TFDeiTForMaskedImageModeling(config=A )
__A = model(A )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__A = 1
__A = TFDeiTForMaskedImageModeling(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase_ ( self : Optional[Any] ,A : Union[str, Any] ,A : Dict ,A : Union[str, Any] ):
__A = self.type_sequence_label_size
__A = TFDeiTForImageClassification(A )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A = 1
__A = TFDeiTForImageClassification(A )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : str ):
__A = TFDeiTModelTester(self )
__A = ConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def UpperCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
def UpperCamelCase_ ( self : List[Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A ,tf.keras.layers.Dense ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ,A : List[str] ,A : Optional[Any]=False ):
__A = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCamelCase_ ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFDeiTModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : int ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[int] ):
__A = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="tf" )
# forward pass
__A = model(**A )
# verify the logits
__A = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape ,A )
__A = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Dict = logging.get_logger(__name__)
a : Union[str, Any] = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """efficientnet"""
def __init__( self : Optional[int] , a_ : int = 3 , a_ : int = 600 , a_ : float = 2.0 , a_ : float = 3.1 , a_ : int = 8 , a_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , a_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , a_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , a_ : List[int] = [] , a_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , a_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , a_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , a_ : float = 0.25 , a_ : str = "swish" , a_ : int = 2_560 , a_ : str = "mean" , a_ : float = 0.02 , a_ : float = 0.001 , a_ : float = 0.99 , a_ : float = 0.5 , a_ : float = 0.2 , **a_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**a_ )
__snake_case = num_channels
__snake_case = image_size
__snake_case = width_coefficient
__snake_case = depth_coefficient
__snake_case = depth_divisor
__snake_case = kernel_sizes
__snake_case = in_channels
__snake_case = out_channels
__snake_case = depthwise_padding
__snake_case = strides
__snake_case = num_block_repeats
__snake_case = expand_ratios
__snake_case = squeeze_expansion_ratio
__snake_case = hidden_act
__snake_case = hidden_dim
__snake_case = pooling_type
__snake_case = initializer_range
__snake_case = batch_norm_eps
__snake_case = batch_norm_momentum
__snake_case = dropout_rate
__snake_case = drop_connect_rate
__snake_case = sum(a_ ) * 4
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def A ( self : List[str] ):
"""simple docstring"""
return 1e-5
| 69 |
SCREAMING_SNAKE_CASE :List[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :Union[str, Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE :int = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
assert len(str(a_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
__A = year // 1_0_0
__A = (5 * (century % 4) + 2) % 7
__A = year % 1_0_0
__A = centurian % 1_2
__A = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__A = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__A = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : Dict , A_ : int=7 , A_ : Any=3 , A_ : List[str]=30 , A_ : Union[str, Any]=400 , A_ : List[str]=True , A_ : int=None , A_ : Any=True , A_ : str=1 / 255 , A_ : int=True , A_ : List[Any]=[0.5, 0.5, 0.5] , A_ : Union[str, Any]=[0.5, 0.5, 0.5] , A_ : Union[str, Any]=True , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_pad
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a__ ( self : Union[str, Any] , A_ : Dict , A_ : Any=False ) -> Union[str, Any]:
"""simple docstring"""
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(A_ , Image.Image ):
lowerCamelCase_ , lowerCamelCase_ = image.size
else:
lowerCamelCase_ , lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size['shortest_edge'] * h / w )
lowerCamelCase_ = self.size['shortest_edge']
elif w > h:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = int(self.size['shortest_edge'] * w / h )
else:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = self.size['shortest_edge']
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ , lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(A_ , key=lambda A_ : item[0] )[0]
lowerCamelCase_ = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = DetrImageProcessor if is_vision_available() else None
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = DetrImageProcessingTester(self )
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'do_rescale' ) )
self.assertTrue(hasattr(A_ , 'rescale_factor' ) )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , A_ )
lowerCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
pass
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'image_id': 39769, 'annotations': target}
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
lowerCamelCase_ = image_processing(images=A_ , annotations=A_ , return_tensors='pt' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A_ ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A_ , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A_ ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A_ ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A_ ) )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A_ ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A_ ) )
@slow
def a__ ( self : str ) -> Any:
"""simple docstring"""
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
lowerCamelCase_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
lowerCamelCase_ = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='pt' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A_ ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A_ , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A_ ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A_ ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A_ ) )
# verify masks
lowerCamelCase_ = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , A_ )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A_ ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A_ ) )
| 70 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase ( a_ = "isbn/0140328726" ) -> dict:
"""simple docstring"""
__A = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
__A = F'''{olid} is not a valid Open Library olid'''
raise ValueError(a_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def UpperCAmelCase ( a_ ) -> dict:
"""simple docstring"""
__A = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
__A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__A = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
__A = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(a_ , a_ ):
__A = ", ".join(a_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE :int = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
SCREAMING_SNAKE_CASE :Any = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 55 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=False ) -> int:
"""simple docstring"""
UpperCAmelCase_ : List[str] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
UpperCAmelCase_ : int = "segformer.encoder." + key
if key.startswith("backbone" ):
UpperCAmelCase_ : Tuple = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase_ : Optional[int] = key[key.find("patch_embed" ) + len("patch_embed" )]
UpperCAmelCase_ : Optional[int] = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_SCREAMING_SNAKE_CASE )-1}''' )
if "norm" in key:
UpperCAmelCase_ : List[Any] = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase_ : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
UpperCAmelCase_ : List[str] = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_SCREAMING_SNAKE_CASE )-1}''' )
if "layer_norm1" in key:
UpperCAmelCase_ : int = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
UpperCAmelCase_ : str = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase_ : Optional[int] = key[key.find("block" ) + len("block" )]
UpperCAmelCase_ : Any = key.replace(F'''block{idx}''' , F'''block.{int(_SCREAMING_SNAKE_CASE )-1}''' )
if "attn.q" in key:
UpperCAmelCase_ : List[str] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
UpperCAmelCase_ : Union[str, Any] = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
UpperCAmelCase_ : Union[str, Any] = key.replace("attn" , "attention.self" )
if "fc1" in key:
UpperCAmelCase_ : int = key.replace("fc1" , "dense1" )
if "fc2" in key:
UpperCAmelCase_ : int = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
UpperCAmelCase_ : List[str] = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
UpperCAmelCase_ : Union[str, Any] = key.replace("linear_fuse.conv" , "linear_fuse" )
UpperCAmelCase_ : Dict = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase_ : List[str] = key[key.find("linear_c" ) + len("linear_c" )]
UpperCAmelCase_ : List[Any] = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_SCREAMING_SNAKE_CASE )-1}''' )
if key.startswith("head" ):
UpperCAmelCase_ : Dict = key.replace("head" , "classifier" )
UpperCAmelCase_ : str = value
return new_state_dict
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase_ : int = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase_ : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase_ : Optional[int] = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase_ : int = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase_ : Tuple = kv_bias[
config.hidden_sizes[i] :
]
def a__ ( ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : int = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = SegformerConfig()
UpperCAmelCase_ : Any = False
# set attributes based on model_name
UpperCAmelCase_ : Optional[Any] = "huggingface/label-files"
if "segformer" in model_name:
UpperCAmelCase_ : Optional[int] = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
UpperCAmelCase_ : List[str] = 1_50
UpperCAmelCase_ : List[Any] = "ade20k-id2label.json"
UpperCAmelCase_ : str = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
UpperCAmelCase_ : Optional[int] = 19
UpperCAmelCase_ : Union[str, Any] = "cityscapes-id2label.json"
UpperCAmelCase_ : List[Any] = (1, 19, 1_28, 1_28)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : List[Any] = model_name[4:6]
UpperCAmelCase_ : Optional[int] = 10_00
UpperCAmelCase_ : str = "imagenet-1k-id2label.json"
UpperCAmelCase_ : Optional[int] = (1, 10_00)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
UpperCAmelCase_ : Tuple = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ : List[str] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = idalabel
UpperCAmelCase_ : Any = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
UpperCAmelCase_ : Optional[Any] = [64, 1_28, 3_20, 5_12]
UpperCAmelCase_ : Dict = 2_56
elif size == "b2":
UpperCAmelCase_ : Tuple = [64, 1_28, 3_20, 5_12]
UpperCAmelCase_ : Union[str, Any] = 7_68
UpperCAmelCase_ : Union[str, Any] = [3, 4, 6, 3]
elif size == "b3":
UpperCAmelCase_ : Union[str, Any] = [64, 1_28, 3_20, 5_12]
UpperCAmelCase_ : int = 7_68
UpperCAmelCase_ : Optional[Any] = [3, 4, 18, 3]
elif size == "b4":
UpperCAmelCase_ : List[Any] = [64, 1_28, 3_20, 5_12]
UpperCAmelCase_ : List[Any] = 7_68
UpperCAmelCase_ : Tuple = [3, 8, 27, 3]
elif size == "b5":
UpperCAmelCase_ : Dict = [64, 1_28, 3_20, 5_12]
UpperCAmelCase_ : Optional[int] = 7_68
UpperCAmelCase_ : List[Any] = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
UpperCAmelCase_ : Dict = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_SCREAMING_SNAKE_CASE , align=_SCREAMING_SNAKE_CASE , do_random_crop=_SCREAMING_SNAKE_CASE )
# prepare image
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : int = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
UpperCAmelCase_ : List[Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )
else:
UpperCAmelCase_ : Optional[int] = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
UpperCAmelCase_ : Union[str, Any] = rename_keys(_SCREAMING_SNAKE_CASE , encoder_only=_SCREAMING_SNAKE_CASE )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
if encoder_only:
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : Optional[Any] = SegformerForImageClassification(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Any = SegformerForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# forward pass
UpperCAmelCase_ : int = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
UpperCAmelCase_ : Any = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
UpperCAmelCase_ : Optional[Any] = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
UpperCAmelCase_ : Tuple = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
UpperCAmelCase_ : Any = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
UpperCAmelCase_ : Tuple = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
UpperCAmelCase_ : List[Any] = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
UpperCAmelCase_ : Any = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
UpperCAmelCase_ : Union[str, Any] = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
UpperCAmelCase_ : Optional[Any] = torch.tensor(
[
[
[-1.1372E01, -1.2787E01, -1.3477E01],
[-1.2536E01, -1.4194E01, -1.4409E01],
[-1.3217E01, -1.4888E01, -1.5327E01],
],
[
[-1.4791E01, -1.7122E01, -1.8277E01],
[-1.7163E01, -1.9192E01, -1.9533E01],
[-1.7897E01, -1.9991E01, -2.0315E01],
],
[
[7.6723E-01, 4.1921E-01, -7.7878E-02],
[4.7772E-01, 9.5557E-03, -2.8082E-01],
[3.6032E-01, -2.4826E-01, -5.1168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
UpperCAmelCase_ : List[str] = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
UpperCAmelCase_ : List[str] = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
UpperCAmelCase_ : Union[str, Any] = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
UpperCAmelCase_ : int = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
UpperCAmelCase_ : Optional[Any] = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
UpperCAmelCase_ : List[Any] = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
UpperCAmelCase_ : Any = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_lowerCamelCase = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 71 |
import requests
SCREAMING_SNAKE_CASE :List[str] = 'YOUR API KEY'
def UpperCAmelCase ( a_ , a_ = giphy_api_key ) -> list:
"""simple docstring"""
__A = "+".join(query.split() )
__A = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
__A = requests.get(a_ ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 55 | 0 |
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : Optional[int]="shi-labs/oneformer_demo" ) -> str:
'''simple docstring'''
with open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) as f:
lowercase =json.load(lowercase_ )
lowercase ={}
lowercase =[]
lowercase =[]
for key, info in class_info.items():
lowercase =info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(lowercase_ ) )
lowercase =thing_ids
lowercase =class_names
return metadata
class __magic_name__ ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=4_00 , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=10 , snake_case_=False , snake_case_=2_55 , snake_case_="shi-labs/oneformer_demo" , snake_case_="ade20k_panoptic.json" , snake_case_=10 , ):
lowercase =parent
lowercase =batch_size
lowercase =num_channels
lowercase =min_resolution
lowercase =max_resolution
lowercase =do_resize
lowercase ={'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size
lowercase =do_normalize
lowercase =image_mean
lowercase =image_std
lowercase =class_info_file
lowercase =prepare_metadata(snake_case_ , snake_case_ )
lowercase =num_text
lowercase =repo_path
# for the post_process_functions
lowercase =2
lowercase =10
lowercase =10
lowercase =3
lowercase =4
lowercase =num_labels
lowercase =do_reduce_labels
lowercase =ignore_index
def _A( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def _A( self , snake_case_ , snake_case_=False ):
if not batched:
lowercase =image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
lowercase , lowercase =image.size
else:
lowercase , lowercase =image.shape[1], image.shape[2]
if w < h:
lowercase =int(self.size['''shortest_edge'''] * h / w )
lowercase =self.size['''shortest_edge''']
elif w > h:
lowercase =self.size['''shortest_edge''']
lowercase =int(self.size['''shortest_edge'''] * w / h )
else:
lowercase =self.size['''shortest_edge''']
lowercase =self.size['''shortest_edge''']
else:
lowercase =[]
for image in image_inputs:
lowercase , lowercase =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase =max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
lowercase =max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
def _A( self ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __magic_name__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
UpperCamelCase__ = image_processing_class
def _A( self ):
lowercase =OneFormerImageProcessorTester(self )
@property
def _A( self ):
return self.image_processing_tester.prepare_image_processor_dict()
def _A( self ):
lowercase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , '''image_mean''' ) )
self.assertTrue(hasattr(snake_case_ , '''image_std''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_normalize''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_resize''' ) )
self.assertTrue(hasattr(snake_case_ , '''size''' ) )
self.assertTrue(hasattr(snake_case_ , '''ignore_index''' ) )
self.assertTrue(hasattr(snake_case_ , '''class_info_file''' ) )
self.assertTrue(hasattr(snake_case_ , '''num_text''' ) )
self.assertTrue(hasattr(snake_case_ , '''repo_path''' ) )
self.assertTrue(hasattr(snake_case_ , '''metadata''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_reduce_labels''' ) )
def _A( self ):
pass
def _A( self ):
# Initialize image_processor
lowercase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase =prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
lowercase =image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase =self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase =self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
lowercase =image_processor(
snake_case_ , ['''semantic'''] * len(snake_case_ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _A( self ):
# Initialize image_processor
lowercase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase =prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
lowercase =image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase =self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase =self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
lowercase =image_processor(
snake_case_ , ['''semantic'''] * len(snake_case_ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _A( self ):
# Initialize image_processor
lowercase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase =prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
lowercase =image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase =self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase =self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
lowercase =image_processor(
snake_case_ , ['''semantic'''] * len(snake_case_ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _A( self , snake_case_=False , snake_case_=False , snake_case_="np" ):
lowercase =self.image_processing_class(**self.image_processor_dict )
# prepare image and target
lowercase =self.image_processing_tester.num_labels
lowercase =None
lowercase =None
lowercase =prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
if with_segmentation_maps:
lowercase =num_labels
if is_instance_map:
lowercase =list(range(snake_case_ ) ) * 2
lowercase =dict(enumerate(snake_case_ ) )
lowercase =[
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
lowercase =[Image.fromarray(snake_case_ ) for annotation in annotations]
lowercase =image_processor(
snake_case_ , ['''semantic'''] * len(snake_case_ ) , snake_case_ , return_tensors='''pt''' , instance_id_to_semantic_id=snake_case_ , pad_and_return_pixel_mask=snake_case_ , )
return inputs
def _A( self ):
pass
def _A( self ):
def common(snake_case_=False , snake_case_=None ):
lowercase =self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case_ , is_instance_map=snake_case_ , segmentation_type=snake_case_ )
lowercase =inputs['''mask_labels''']
lowercase =inputs['''class_labels''']
lowercase =inputs['''pixel_values''']
lowercase =inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case_ , snake_case_ , snake_case_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case_ )
common(is_instance_map=snake_case_ , segmentation_type='''pil''' )
common(is_instance_map=snake_case_ , segmentation_type='''pil''' )
def _A( self ):
lowercase =np.zeros((20, 50) )
lowercase =1
lowercase =1
lowercase =1
lowercase =binary_mask_to_rle(snake_case_ )
self.assertEqual(len(snake_case_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def _A( self ):
lowercase =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
lowercase =self.image_processing_tester.get_fake_oneformer_outputs()
lowercase =fature_extractor.post_process_semantic_segmentation(snake_case_ )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
lowercase =[(1, 4) for i in range(self.image_processing_tester.batch_size )]
lowercase =fature_extractor.post_process_semantic_segmentation(snake_case_ , target_sizes=snake_case_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def _A( self ):
lowercase =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
lowercase =self.image_processing_tester.get_fake_oneformer_outputs()
lowercase =image_processor.post_process_instance_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , snake_case_ )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def _A( self ):
lowercase =self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
lowercase =self.image_processing_tester.get_fake_oneformer_outputs()
lowercase =image_processor.post_process_panoptic_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , snake_case_ )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 72 |
import itertools
import math
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__A = 2
while True:
if is_prime(a_ ):
yield num
num += 1
def UpperCAmelCase ( a_ = 1_0_0_0_1 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , a_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 | 0 |
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(_UpperCAmelCase , n - 1 , _UpperCAmelCase) * a) % mod
else:
SCREAMING_SNAKE_CASE = binary_exponentiation(_UpperCAmelCase , n / 2 , _UpperCAmelCase)
return (b * b) % mod
# a prime number
a_ : str = 7_01
a_ : Union[str, Any] = 10_00_00_00_00
a_ : List[str] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 73 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
__A = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__A = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(a_ ):
os.makedirs(a_ )
__A = model.state_dict()
def to_tf_var_name(a_ ):
for patt, repl in iter(a_ ):
__A = name.replace(a_ , a_ )
return F'''bert/{name}'''
def create_tf_var(a_ , a_ , a_ ):
__A = tf.dtypes.as_dtype(tensor.dtype )
__A = tf.get_variable(dtype=a_ , shape=tensor.shape , name=a_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__A = to_tf_var_name(a_ )
__A = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__A = torch_tensor.T
__A = create_tf_var(tensor=a_ , name=a_ , session=a_ )
tf.keras.backend.set_value(a_ , a_ )
__A = session.run(a_ )
print(F'''Successfully created {tf_name}: {np.allclose(a_ , a_ )}''' )
__A = tf.train.Saver(tf.trainable_variables() )
saver.save(a_ , os.path.join(a_ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def UpperCAmelCase ( a_=None ) -> List[Any]:
"""simple docstring"""
__A = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=a_ , required=a_ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=a_ , default=a_ , required=a_ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=a_ , required=a_ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=a_ , required=a_ , help="Directory in which to save tensorflow model" )
__A = parser.parse_args(a_ )
__A = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 55 | 0 |
from pathlib import Path
import fire
from tqdm import tqdm
def a__ ( snake_case="ro" , snake_case="en" , snake_case="wmt16" , snake_case=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
__SCREAMING_SNAKE_CASE : str = F'''{src_lang}-{tgt_lang}'''
print(F'''Converting {dataset}-{pair}''' )
__SCREAMING_SNAKE_CASE : Dict = datasets.load_dataset(snake_case , snake_case )
if save_dir is None:
__SCREAMING_SNAKE_CASE : List[str] = F'''{dataset}-{pair}'''
__SCREAMING_SNAKE_CASE : Dict = Path(snake_case )
save_dir.mkdir(exist_ok=snake_case )
for split in ds.keys():
print(F'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
__SCREAMING_SNAKE_CASE : Optional[int] = '''val''' if split == '''validation''' else split
__SCREAMING_SNAKE_CASE : Union[str, Any] = save_dir.joinpath(F'''{fn}.source''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = save_dir.joinpath(F'''{fn}.target''' )
__SCREAMING_SNAKE_CASE : Dict = src_path.open('''w+''' )
__SCREAMING_SNAKE_CASE : int = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__SCREAMING_SNAKE_CASE : Tuple = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(F'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 74 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :Any = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
UpperCamelCase__ = list[tuple[int, int]]
UpperCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , _A : int , _A : int , _A : int , _A : int , _A : Node | None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = pos_x
UpperCAmelCase__ : Optional[int] = pos_y
UpperCAmelCase__ : Optional[int] = (pos_y, pos_x)
UpperCAmelCase__ : Optional[Any] = goal_x
UpperCAmelCase__ : Tuple = goal_y
UpperCAmelCase__ : Union[str, Any] = parent
class lowerCamelCase_ :
def __init__( self : int , _A : tuple[int, int] , _A : tuple[int, int] ):
'''simple docstring'''
UpperCAmelCase__ : int = Node(start[1] , start[0] , goal[1] , goal[0] , _A )
UpperCAmelCase__ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , _A )
UpperCAmelCase__ : int = [self.start]
UpperCAmelCase__ : List[str] = False
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
while self.node_queue:
UpperCAmelCase__ : Dict = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase__ : Tuple = True
return self.retrace_path(_A )
UpperCAmelCase__ : Dict = self.get_successors(_A )
for node in successors:
self.node_queue.append(_A )
if not self.reached:
return [self.start.pos]
return None
def lowercase_ ( self : Optional[int] , _A : Node ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
for action in delta:
UpperCAmelCase__ : int = parent.pos_x + action[1]
UpperCAmelCase__ : str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_A , _A , self.target.pos_y , self.target.pos_x , _A ) )
return successors
def lowercase_ ( self : Any , _A : Node | None ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = node
UpperCAmelCase__ : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase__ : Dict = current_node.parent
path.reverse()
return path
class lowerCamelCase_ :
def __init__( self : int , _A : Optional[Any] , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = BreadthFirstSearch(_A , _A )
UpperCAmelCase__ : Dict = BreadthFirstSearch(_A , _A )
UpperCAmelCase__ : Union[str, Any] = False
def lowercase_ ( self : List[str] ):
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
UpperCAmelCase__ : Tuple = self.fwd_bfs.node_queue.pop(0 )
UpperCAmelCase__ : Optional[Any] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
UpperCAmelCase__ : Optional[Any] = True
return self.retrace_bidirectional_path(
_A , _A )
UpperCAmelCase__ : List[str] = current_bwd_node
UpperCAmelCase__ : Dict = current_fwd_node
UpperCAmelCase__ : str = {
self.fwd_bfs: self.fwd_bfs.get_successors(_A ),
self.bwd_bfs: self.bwd_bfs.get_successors(_A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowercase_ ( self : Dict , _A : Node , _A : Node ):
'''simple docstring'''
UpperCAmelCase__ : int = self.fwd_bfs.retrace_path(_A )
UpperCAmelCase__ : str = self.bwd_bfs.retrace_path(_A )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase__ : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
UpperCamelCase__ = (0, 0)
UpperCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase__ = time.time()
UpperCamelCase__ = BreadthFirstSearch(init, goal)
UpperCamelCase__ = bfs.search()
UpperCamelCase__ = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
UpperCamelCase__ = time.time()
UpperCamelCase__ = BidirectionalBreadthFirstSearch(init, goal)
UpperCamelCase__ = bd_bfs.search()
UpperCamelCase__ = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 75 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE :int = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def UpperCAmelCase ( a_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__A = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
__A = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
__A = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 55 | 0 |
"""simple docstring"""
a_ = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 76 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] ):
__A = tempfile.mkdtemp()
__A = BlipImageProcessor()
__A = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
__A = BlipaProcessor(A ,A )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ,**A : int ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).tokenizer
def UpperCamelCase_ ( self : Dict ,**A : Optional[int] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).image_processor
def UpperCamelCase_ ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : Optional[int] ):
__A = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
__A = [Image.fromarray(np.moveaxis(A ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Any ):
__A = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
__A = self.get_image_processor(do_normalize=A ,padding_value=1.0 )
__A = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=A ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = self.prepare_image_inputs()
__A = image_processor(A ,return_tensors="np" )
__A = processor(images=A ,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self : Tuple ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = processor(text=A )
__A = tokenizer(A ,return_token_type_ids=A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self : int ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A = processor.batch_decode(A )
__A = tokenizer.batch_decode(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipaProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
| 55 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a__ ( __magic_name__ ):
@staticmethod
@abstractmethod
def a_ ( UpperCamelCase_ : ArgumentParser):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def a_ ( self : List[Any]):
"""simple docstring"""
raise NotImplementedError()
| 77 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,A : Any ,A : List[str] ,A : Union[str, Any]=10_24 ,A : int=10_24 ,A : Optional[Any]=3.6 ):
__A = tokenizer
__A = tokenizer.bos_token_id
__A = dataset
__A = seq_length
__A = seq_length * chars_per_token * num_of_sequences
def __iter__( self : List[Any] ):
__A = iter(self.dataset )
__A = True
while more_examples:
__A , __A = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
__A = False
break
__A = tokenizer(A ,truncation=A )["input_ids"]
__A = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 ,len(A ) ,self.seq_length ):
__A = all_token_ids[i : i + self.seq_length]
if len(A ) == self.seq_length:
yield torch.tensor(A )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = {"streaming": True}
__A = load_dataset(args.dataset_name , split="train" , **a_ )
__A = ConstantLengthDataset(a_ , a_ , seq_length=args.seq_length )
__A = DataLoader(a_ , batch_size=args.batch_size )
return eval_dataloader
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
model.eval()
__A = []
for step, batch in enumerate(a_ ):
with torch.no_grad():
__A = model(a_ , labels=a_ )
__A = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(a_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__A = torch.mean(torch.cat(a_ ) )
try:
__A = torch.exp(a_ )
except OverflowError:
__A = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
SCREAMING_SNAKE_CASE :Optional[int] = Accelerator()
# Parse configuration
SCREAMING_SNAKE_CASE :str = HfArgumentParser(EvaluationArguments)
SCREAMING_SNAKE_CASE :int = parser.parse_args()
set_seed(args.seed)
# Logging
SCREAMING_SNAKE_CASE :Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
SCREAMING_SNAKE_CASE :List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
SCREAMING_SNAKE_CASE :int = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
SCREAMING_SNAKE_CASE :List[str] = create_dataloader(args)
# Prepare everything with our `accelerator`.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 55 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class __A ( UpperCamelCase__ ):
a__ : int = """MCTCTFeatureExtractor"""
a__ : int = """AutoTokenizer"""
def __init__(self : Dict , __a : Tuple , __a : Optional[Any] ):
super().__init__(__a , __a )
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
def __call__(self : Dict , *__a : int , **__a : str ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
UpperCAmelCase_ = kwargs.pop("raw_speech" )
else:
UpperCAmelCase_ = kwargs.pop("audio" , __a )
UpperCAmelCase_ = kwargs.pop("sampling_rate" , __a )
UpperCAmelCase_ = kwargs.pop("text" , __a )
if len(__a ) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCAmelCase_ = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
if text is not None:
UpperCAmelCase_ = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase_ = encodings["input_ids"]
return inputs
def _lowercase (self : List[Any] , *__a : List[Any] , **__a : int ):
return self.tokenizer.batch_decode(*__a , **__a )
def _lowercase (self : Optional[int] , *__a : str , **__a : List[Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__a , **__a )
UpperCAmelCase_ = kwargs.pop("input_features" , __a )
UpperCAmelCase_ = kwargs.pop("labels" , __a )
if len(__a ) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if input_features is not None:
UpperCAmelCase_ = self.feature_extractor.pad(__a , *__a , **__a )
if labels is not None:
UpperCAmelCase_ = self.tokenizer.pad(__a , **__a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase_ = labels["input_ids"]
return input_features
def _lowercase (self : Optional[int] , *__a : List[Any] , **__a : Tuple ):
return self.tokenizer.decode(*__a , **__a )
@contextmanager
def _lowercase (self : Any ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
UpperCAmelCase_ = True
UpperCAmelCase_ = self.tokenizer
yield
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
| 78 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = LayoutLMTokenizer
snake_case_ = LayoutLMTokenizerFast
snake_case_ = True
snake_case_ = True
def UpperCamelCase_ ( self : Any ):
super().setUp()
__A = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self : Tuple ,**A : int ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : Optional[Any] ,A : Any ):
__A = "UNwant\u00E9d,running"
__A = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ ( self : str ):
__A = self.tokenizer_class(self.vocab_file )
__A = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(A ,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ ( self : int ):
pass
| 55 | 0 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
SCREAMING_SNAKE_CASE__ : str = 25_60_47
SCREAMING_SNAKE_CASE__ : Optional[int] = 25_61_45
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = NllbTokenizer
__lowerCamelCase = NllbTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = {}
def __UpperCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Any = NllbTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = NllbTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase__ : str = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : str = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : str = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Dict = tempfile.mkdtemp()
UpperCAmelCase__ : int = tokenizer_r.save_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : Dict = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase__ : Tuple = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : str = tokenizer_r.from_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : Tuple = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ : List[str] = tempfile.mkdtemp()
UpperCAmelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase )
UpperCAmelCase__ : int = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : int = tokenizer_r.from_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase__ : List[str] = tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : int = tokenizer_r.from_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : str = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
@require_torch
def __UpperCAmelCase ( self ):
if not self.test_seqaseq:
return
UpperCAmelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Longer text that will definitely require truncation.
UpperCAmelCase__ : Optional[Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
UpperCAmelCase__ : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
UpperCAmelCase__ : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
src_texts=_lowerCAmelCase , tgt_texts=_lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
UpperCAmelCase__ : Optional[Any] = tokenizer.prepare_seqaseq_batch(
_lowerCAmelCase , tgt_texts=_lowerCAmelCase , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
UpperCAmelCase__ : Tuple = tokenizer.prepare_seqaseq_batch(
src_texts=_lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , _lowerCAmelCase )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : int = [AddedToken("""<special>""" , lstrip=_lowerCAmelCase )]
UpperCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : str = tokenizer_r.encode("""Hey this is a <special> token""" )
UpperCAmelCase__ : Optional[Any] = tokenizer_r.encode("""<special>""" , add_special_tokens=_lowerCAmelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
UpperCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
UpperCAmelCase__ : Any = self.tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = tokenizer_p.encode("""Hey this is a <special> token""" )
UpperCAmelCase__ : Dict = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
__lowerCamelCase = 'facebook/nllb-200-distilled-600M'
__lowerCamelCase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
__lowerCamelCase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
__lowerCamelCase = [
256_047,
16_297,
134_408,
8_165,
248_066,
14_734,
950,
1_135,
105_721,
3_573,
83,
27_352,
108,
49_486,
2,
]
@classmethod
def __UpperCAmelCase ( cls ):
UpperCAmelCase__ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
UpperCAmelCase__ : Union[str, Any] = 1
return cls
def __UpperCAmelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 256057 )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids )
# fmt: off
UpperCAmelCase__ : Union[str, Any] = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
UpperCAmelCase__ : Optional[Any] = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[Any] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , _lowerCAmelCase )
UpperCAmelCase__ : Any = 10
UpperCAmelCase__ : Dict = self.tokenizer(_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , _lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [256203, 3] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Dict = tempfile.mkdtemp()
UpperCAmelCase__ : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : List[str] = NllbTokenizer.from_pretrained(_lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowerCAmelCase )
@require_torch
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
UpperCAmelCase__ : Optional[int] = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
UpperCAmelCase__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = self.tokenizer(self.src_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=3 , return_tensors="""pt""" )
UpperCAmelCase__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=10 , return_tensors="""pt""" )
UpperCAmelCase__ : List[Any] = targets["""input_ids"""]
UpperCAmelCase__ : Dict = shift_tokens_right(
_lowerCAmelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[256047, 70, 7356, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 256057,
} , )
@require_torch
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Tuple = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Tuple = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 79 |
SCREAMING_SNAKE_CASE :int = {str(digit): digit**5 for digit in range(10)}
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a_ ) )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(a_ ) )
if __name__ == "__main__":
print(solution())
| 55 | 0 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : int = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :List[Any] = BartphoTokenizer
__snake_case :int = False
__snake_case :Union[str, Any] = True
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
super().setUp()
__lowercase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
__lowercase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__lowercase = {"""unk_token""": """<unk>"""}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
__lowercase = BartphoTokenizer(_lowerCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : List[str] , **_lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _a ( self : str , _lowerCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = """This is a là test"""
__lowercase = """This is a<unk><unk> test"""
return input_text, output_text
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = BartphoTokenizer(_lowerCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
__lowercase = """This is a là test"""
__lowercase = """▁This ▁is ▁a ▁l à ▁t est""".split()
__lowercase = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
| 80 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
__A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A ,"tf_padding" ) )
self.parent.assertTrue(hasattr(A ,"depth_multiplier" ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ,A : int ,A : List[Any]=13 ,A : int=3 ,A : Optional[Any]=32 ,A : Union[str, Any]=0.25 ,A : Tuple=8 ,A : Optional[int]=True ,A : Union[str, Any]=10_24 ,A : Any=32 ,A : Optional[int]="relu6" ,A : int=0.1 ,A : Optional[Any]=0.02 ,A : Optional[Any]=True ,A : List[str]=True ,A : str=10 ,A : str=None ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = depth_multiplier
__A = min_depth
__A = tf_padding
__A = int(last_hidden_size * depth_multiplier )
__A = output_stride
__A = hidden_act
__A = classifier_dropout_prob
__A = use_labels
__A = is_training
__A = num_labels
__A = initializer_range
__A = scope
def UpperCamelCase_ ( self : Optional[int] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.num_labels )
__A = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
__A = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,min_depth=self.min_depth ,tf_padding=self.tf_padding ,hidden_act=self.hidden_act ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : Optional[int] ,A : str ,A : Tuple ,A : Optional[int] ,A : List[str] ):
__A = MobileNetVaModel(config=A )
model.to(A )
model.eval()
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase_ ( self : List[Any] ,A : Union[str, Any] ,A : List[Any] ,A : int ,A : Union[str, Any] ):
__A = self.num_labels
__A = MobileNetVaForImageClassification(A )
model.to(A )
model.eval()
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Tuple ):
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Any ):
__A = MobileNetVaModelTester(self )
__A = MobileNetVaConfigTester(self ,config_class=A ,has_text_modality=A )
def UpperCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def UpperCamelCase_ ( self : Any ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[int] ):
def check_hidden_states_output(A : List[Any] ,A : List[Any] ,A : Optional[int] ):
__A = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(A ,A ) )
__A = outputs.hidden_states
__A = 26
self.assertEqual(len(A ) ,A )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(A ,A ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = MobileNetVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : List[str] ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
__A = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
__A = model(**A )
# verify the logits
__A = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape ,A )
__A = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.