code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : str=0.999 , __magic_name__ : Union[str, Any]="cosine" , ) -> Tuple:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__magic_name__ : str ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__magic_name__ : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
snake_case__ : Dict = []
for i in range(__magic_name__ ):
snake_case__ : Dict = i / num_diffusion_timesteps
snake_case__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__magic_name__ ) / alpha_bar_fn(__magic_name__ ) , __magic_name__ ) )
return torch.tensor(__magic_name__ , dtype=torch.floataa )
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = [e.name for e in KarrasDiffusionSchedulers]
lowerCamelCase__ = 2
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE = 1_0_0_0 , __SCREAMING_SNAKE_CASE = 0.0_0085 , __SCREAMING_SNAKE_CASE = 0.012 , __SCREAMING_SNAKE_CASE = "linear" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "epsilon" , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = "linspace" , __SCREAMING_SNAKE_CASE = 0 , ):
if trained_betas is not None:
snake_case__ : List[Any] = torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case__ : Union[str, Any] = torch.linspace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case__ : Dict = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __SCREAMING_SNAKE_CASE , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case__ : List[str] = betas_for_alpha_bar(__SCREAMING_SNAKE_CASE , alpha_transform_type="""cosine""" )
elif beta_schedule == "exp":
snake_case__ : List[Any] = betas_for_alpha_bar(__SCREAMING_SNAKE_CASE , alpha_transform_type="""exp""" )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
snake_case__ : Tuple = 1.0 - self.betas
snake_case__ : Union[str, Any] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = use_karras_sigmas
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
if schedule_timesteps is None:
snake_case__ : Union[str, Any] = self.timesteps
snake_case__ : List[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
snake_case__ : int = 1 if len(__SCREAMING_SNAKE_CASE ) > 1 else 0
else:
snake_case__ : List[str] = timestep.cpu().item() if torch.is_tensor(__SCREAMING_SNAKE_CASE ) else timestep
snake_case__ : Dict = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __UpperCamelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
snake_case__ : str = self.index_for_timestep(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.sigmas[step_index]
snake_case__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : str = num_inference_steps
snake_case__ : Optional[int] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
snake_case__ : int = np.linspace(0 , num_train_timesteps - 1 , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )[::-1].copy()
elif self.config.timestep_spacing == "leading":
snake_case__ : Any = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case__ : str = (np.arange(0 , __SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1].copy().astype(__SCREAMING_SNAKE_CASE )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
snake_case__ : Optional[int] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case__ : Optional[int] = (np.arange(__SCREAMING_SNAKE_CASE , 0 , -step_ratio )).round().copy().astype(__SCREAMING_SNAKE_CASE )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
snake_case__ : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
snake_case__ : int = np.log(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = np.interp(__SCREAMING_SNAKE_CASE , np.arange(0 , len(__SCREAMING_SNAKE_CASE ) ) , __SCREAMING_SNAKE_CASE )
if self.config.use_karras_sigmas:
snake_case__ : Any = self._convert_to_karras(in_sigmas=__SCREAMING_SNAKE_CASE , num_inference_steps=self.num_inference_steps )
snake_case__ : Tuple = np.array([self._sigma_to_t(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for sigma in sigmas] )
snake_case__ : List[str] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
snake_case__ : Any = torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(device=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
snake_case__ : Dict = torch.from_numpy(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
# mps does not support float64
snake_case__ : List[str] = timesteps.to(__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
else:
snake_case__ : Optional[Any] = timesteps.to(device=__SCREAMING_SNAKE_CASE )
# empty dt and derivative
snake_case__ : int = None
snake_case__ : Union[str, Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
snake_case__ : str = defaultdict(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# get log sigma
snake_case__ : int = np.log(__SCREAMING_SNAKE_CASE )
# get distribution
snake_case__ : Dict = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
snake_case__ : Union[str, Any] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
snake_case__ : Tuple = low_idx + 1
snake_case__ : Any = log_sigmas[low_idx]
snake_case__ : List[Any] = log_sigmas[high_idx]
# interpolate sigmas
snake_case__ : Tuple = (low - log_sigma) / (low - high)
snake_case__ : Optional[int] = np.clip(__SCREAMING_SNAKE_CASE , 0 , 1 )
# transform interpolation to time range
snake_case__ : Optional[Any] = (1 - w) * low_idx + w * high_idx
snake_case__ : Union[str, Any] = t.reshape(sigma.shape )
return t
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : float = in_sigmas[-1].item()
snake_case__ : float = in_sigmas[0].item()
snake_case__ : List[str] = 7.0 # 7.0 is the value used in the paper
snake_case__ : Dict = np.linspace(0 , 1 , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = sigma_min ** (1 / rho)
snake_case__ : List[str] = sigma_max ** (1 / rho)
snake_case__ : Any = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __UpperCamelCase ( self ):
return self.dt is None
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , ):
snake_case__ : str = self.index_for_timestep(__SCREAMING_SNAKE_CASE )
# advance index counter by 1
snake_case__ : List[str] = timestep.cpu().item() if torch.is_tensor(__SCREAMING_SNAKE_CASE ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
snake_case__ : Tuple = self.sigmas[step_index]
snake_case__ : str = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
snake_case__ : Union[str, Any] = self.sigmas[step_index - 1]
snake_case__ : Dict = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
snake_case__ : Tuple = 0
snake_case__ : Optional[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
snake_case__ : Optional[int] = sigma_hat if self.state_in_first_order else sigma_next
snake_case__ : Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
snake_case__ : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next
snake_case__ : Optional[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
snake_case__ : str = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
snake_case__ : List[str] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
snake_case__ : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
snake_case__ : Optional[Any] = sigma_next - sigma_hat
# store for 2nd order step
snake_case__ : List[Any] = derivative
snake_case__ : int = dt
snake_case__ : Optional[int] = sample
else:
# 2. 2nd order / Heun's method
snake_case__ : List[Any] = (sample - pred_original_sample) / sigma_next
snake_case__ : Dict = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
snake_case__ : Optional[Any] = self.dt
snake_case__ : Any = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
snake_case__ : str = None
snake_case__ : List[Any] = None
snake_case__ : int = None
snake_case__ : List[Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case__ : str = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__SCREAMING_SNAKE_CASE ):
# mps does not support float64
snake_case__ : List[str] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
snake_case__ : str = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
snake_case__ : Tuple = self.timesteps.to(original_samples.device )
snake_case__ : str = timesteps.to(original_samples.device )
snake_case__ : Tuple = [self.index_for_timestep(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for t in timesteps]
snake_case__ : Tuple = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
snake_case__ : List[str] = sigma.unsqueeze(-1 )
snake_case__ : str = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 38 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase__ ( __magic_name__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
snake_case__ : Union[str, Any] = f"https://www.amazon.in/laptop/s?k={product}"
snake_case__ : List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
snake_case__ : int = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
snake_case__ : Optional[int] = item.ha.text
snake_case__ : Any = """https://www.amazon.in/""" + item.ha.a["""href"""]
snake_case__ : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
snake_case__ : Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
snake_case__ : Optional[int] = """Not available"""
try:
snake_case__ : Tuple = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
snake_case__ : Optional[Any] = """"""
try:
snake_case__ : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_00 )
except ValueError:
snake_case__ : List[Any] = float("""nan""" )
except AttributeError:
pass
snake_case__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : List[Any] = """ """
snake_case__ : Union[str, Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : int = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 38 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int ) -> list[list[int]]:
'''simple docstring'''
snake_case__ : list[list[int]] = []
create_all_state(1 , __magic_name__ , __magic_name__ , [] , __magic_name__ )
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : list[list[int]] , ) -> None:
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__magic_name__ , total_number - level + 2 ):
current_list.append(__magic_name__ )
create_all_state(i + 1 , __magic_name__ , level - 1 , __magic_name__ , __magic_name__ )
current_list.pop()
def UpperCamelCase__ ( __magic_name__ : list[list[int]] ) -> None:
'''simple docstring'''
for i in total_list:
print(*__magic_name__ )
if __name__ == "__main__":
A_ : Optional[int] = 4
A_ : Dict = 2
A_ : List[Any] = generate_all_combinations(n, k)
print_all_state(total_list)
| 38 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LongformerTokenizer
lowerCamelCase__ = True
lowerCamelCase__ = LongformerTokenizerFast
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Optional[int] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Any = {"""unk_token""": """<unk>"""}
snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = """lower newer"""
snake_case__ : Dict = """lower newer"""
return input_text, output_text
def __UpperCamelCase ( self ):
snake_case__ : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = """lower newer"""
snake_case__ : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokens + [tokenizer.unk_token]
snake_case__ : List[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
snake_case__ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : int = """Encode this sequence."""
snake_case__ : Union[str, Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
snake_case__ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
snake_case__ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
snake_case__ : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
snake_case__ : Dict = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Encode <mask> sequence"""
snake_case__ : Tuple = """Encode <mask>sequence"""
snake_case__ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : str = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """A, <mask> AllenNLP sentence."""
snake_case__ : str = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __UpperCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case__ : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""trim_offsets"""] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ : Any = f"{text_of_1_token} {text_of_1_token}"
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 38 | 1 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A_ : Union[str, Any] = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , ):
snake_case__ : str = [file for file in os.listdir(__SCREAMING_SNAKE_CASE ) if os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )]
if identifier is not None:
snake_case__ : Optional[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for n_ in n_identifier:
snake_case__ : Any = [file for file in files if n_ not in file]
else:
snake_case__ : int = [file for file in files if n_identifier not in file]
snake_case__ : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
snake_case__ : str = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __SCREAMING_SNAKE_CASE )
if only_modules:
snake_case__ : Optional[int] = file.split(""".""" )[0]
try:
snake_case__ : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = doctest.DocTestSuite(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = unittest.TextTestRunner().run(__SCREAMING_SNAKE_CASE )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"{module_identifier} is not a module." )
else:
snake_case__ : List[Any] = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __UpperCamelCase ( self ):
snake_case__ : Any = Path("""src/transformers""" )
snake_case__ : Any = """modeling"""
snake_case__ : Optional[int] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = Path("""src/transformers""" )
snake_case__ : Optional[Any] = """tokenization"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = Path("""src/transformers""" )
snake_case__ : Dict = """configuration"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = Path("""src/transformers""" )
snake_case__ : Optional[int] = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , n_identifier=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = Path("""docs/source""" )
snake_case__ : Dict = ["""favicon.ico"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE , only_modules=__SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Any = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''resnet'''
lowerCamelCase__ = ['''basic''', '''bottleneck''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
snake_case__ : List[Any] = num_channels
snake_case__ : str = embedding_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : Dict = depths
snake_case__ : List[Any] = layer_type
snake_case__ : int = hidden_act
snake_case__ : Union[str, Any] = downsample_in_first_stage
snake_case__ : Dict = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-3
| 38 | 1 |
'''simple docstring'''
import sys
import turtle
def UpperCamelCase__ ( __magic_name__ : tuple[float, float] , __magic_name__ : tuple[float, float] ) -> tuple[float, float]:
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def UpperCamelCase__ ( __magic_name__ : tuple[float, float] , __magic_name__ : tuple[float, float] , __magic_name__ : tuple[float, float] , __magic_name__ : int , ) -> None:
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(__magic_name__ , get_mid(__magic_name__ , __magic_name__ ) , get_mid(__magic_name__ , __magic_name__ ) , depth - 1 )
triangle(__magic_name__ , get_mid(__magic_name__ , __magic_name__ ) , get_mid(__magic_name__ , __magic_name__ ) , depth - 1 )
triangle(__magic_name__ , get_mid(__magic_name__ , __magic_name__ ) , get_mid(__magic_name__ , __magic_name__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
A_ : int = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
A_ : Optional[Any] = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 38 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 38 | 1 |
'''simple docstring'''
from PIL import Image
def UpperCamelCase__ ( __magic_name__ : Image ) -> Image:
'''simple docstring'''
snake_case__ , snake_case__ : Tuple = image.size
snake_case__ : List[Any] = 0
snake_case__ : Optional[Any] = image.load()
for i in range(__magic_name__ ):
for j in range(__magic_name__ ):
snake_case__ : Any = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__magic_name__ ):
for i in range(__magic_name__ ):
snake_case__ : Any = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
A_ : Any = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 38 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
snake_case__ : str = []
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_init_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_evaluate""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_predict""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_save""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_log""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_prediction_step""" )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = tempfile.mkdtemp()
def __UpperCamelCase ( self ):
shutil.rmtree(self.output_dir )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : List[Any] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionModelConfig(a=__SCREAMING_SNAKE_CASE , b=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionPreTrainedModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = TrainingArguments(self.output_dir , disable_tqdm=__SCREAMING_SNAKE_CASE , report_to=[] , **__SCREAMING_SNAKE_CASE )
return Trainer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , callbacks=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case__ : Tuple = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = len(trainer.get_eval_dataloader() )
snake_case__ : Any = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_trainer()
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Optional[Any] = self.get_trainer(disable_tqdm=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.get_trainer()
snake_case__ : List[str] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 38 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
A_ : str = "▁"
A_ : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"}
A_ : List[str] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
A_ : int = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
A_ : Dict = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
lowerCamelCase__ = []
lowerCamelCase__ = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : int = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
snake_case__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
snake_case__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case__ : Any = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case__ : int = 1
snake_case__ : Tuple = len(self.sp_model )
snake_case__ : Optional[int] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__SCREAMING_SNAKE_CASE )
}
snake_case__ : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
snake_case__ : int = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
snake_case__ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case__ : Optional[int] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
snake_case__ : Optional[int] = src_lang if src_lang is not None else """en_XX"""
snake_case__ : Tuple = self.lang_code_to_id[self._src_lang]
snake_case__ : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
snake_case__ : int = self.__dict__.copy()
snake_case__ : Union[str, Any] = None
snake_case__ : Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case__ : Tuple = {}
snake_case__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __UpperCamelCase ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = [1] * len(self.prefix_tokens )
snake_case__ : Optional[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
snake_case__ : str = [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case__ : Union[str, Any] = src_lang
snake_case__ : Tuple = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ : Union[str, Any] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] = """""".join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case__ : Tuple = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi:
snake_case__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
snake_case__ : str = src_lang
snake_case__ : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] = self.lang_code_to_id[src_lang]
snake_case__ : str = []
snake_case__ : Any = [self.eos_token_id, self.cur_lang_code]
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = self.lang_code_to_id[lang]
snake_case__ : List[Any] = []
snake_case__ : Dict = [self.eos_token_id, self.cur_lang_code]
| 38 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ):
snake_case__ : Any = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : Tuple = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : Union[str, Any] = image_std
def __UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : str = DPTImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 38 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LEDTokenizer
lowerCamelCase__ = LEDTokenizerFast
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
snake_case__ : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : int = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : List[Any] = {"""unk_token""": """<unk>"""}
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def __UpperCamelCase ( self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
snake_case__ : Optional[int] = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : Any = tokenizer(__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
snake_case__ : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : Dict = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIn("""input_ids""" , __SCREAMING_SNAKE_CASE )
self.assertIn("""attention_mask""" , __SCREAMING_SNAKE_CASE )
self.assertNotIn("""labels""" , __SCREAMING_SNAKE_CASE )
self.assertNotIn("""decoder_attention_mask""" , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : Union[str, Any] = tokenizer(text_target=__SCREAMING_SNAKE_CASE , max_length=3_2 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
@require_torch
def __UpperCamelCase ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : List[str] = tokenizer(
["""I am a small frog""" * 1_0_2_4, """I am a small frog"""] , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : str = ["""A long paragraph for summarization."""]
snake_case__ : List[Any] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : Any = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : int = tokenizer(text_target=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : List[Any] = inputs["""input_ids"""]
snake_case__ : int = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : Dict = ["""Summary of the text.""", """Another summary."""]
snake_case__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
snake_case__ : Optional[Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = [[0] * len(__SCREAMING_SNAKE_CASE ) for x in encoded_output["""input_ids"""]]
snake_case__ : Optional[int] = tokenizer.pad(__SCREAMING_SNAKE_CASE )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : str = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = """A, <mask> AllenNLP sentence."""
snake_case__ : List[str] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 38 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """num_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1_6, 4_8, 9_6] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 1_0] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ):
snake_case__ : List[str] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : List[Any] = patch_sizes
snake_case__ : Optional[int] = patch_stride
snake_case__ : Optional[Any] = patch_padding
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : Dict = num_labels
snake_case__ : Optional[Any] = num_channels
snake_case__ : Optional[Any] = embed_dim
snake_case__ : Optional[int] = num_heads
snake_case__ : Optional[int] = stride_kv
snake_case__ : int = depth
snake_case__ : Optional[Any] = cls_token
snake_case__ : List[Any] = attention_drop_rate
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFCvtModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = (self.image_size, self.image_size)
snake_case__ , snake_case__ : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case__ : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case__ : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : str = TFCvtForImageClassification(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtModelTester(self )
snake_case__ : Any = TFCvtConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def __UpperCamelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __UpperCamelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__SCREAMING_SNAKE_CASE )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[int] = outputs.hidden_states
snake_case__ : Tuple = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFCvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : int = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCamelCase__ ( __magic_name__ : float , __magic_name__ : int ) -> float:
'''simple docstring'''
snake_case__ : Dict = u
for i in range(1 , __magic_name__ ):
snake_case__ : Tuple = temp * (u - i)
return temp
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
snake_case__ : str = int(input("""enter the numbers of values: """ ) )
snake_case__ : list[list[float]] = []
for _ in range(__magic_name__ ):
y.append([] )
for i in range(__magic_name__ ):
for j in range(__magic_name__ ):
y[i].append(__magic_name__ )
snake_case__ : List[Any] = 0
print("""enter the values of parameters in a list: """ )
snake_case__ : Tuple = list(map(__magic_name__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(__magic_name__ ):
snake_case__ : Tuple = float(input() )
snake_case__ : Optional[Any] = int(input("""enter the value to interpolate: """ ) )
snake_case__ : Optional[int] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __magic_name__ ):
for j in range(n - i ):
snake_case__ : int = y[j + 1][i - 1] - y[j][i - 1]
snake_case__ : Optional[int] = y[0][0]
for i in range(1 , __magic_name__ ):
summ += (ucal(__magic_name__ , __magic_name__ ) * y[0][i]) / math.factorial(__magic_name__ )
print(f"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 38 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
snake_case__ : int = [[1, 2, 4], [1, 2, 3, 4]]
snake_case__ : Any = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCamelCase ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
snake_case__ : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def __UpperCamelCase ( self ):
snake_case__ : List[str] = [[1, 2, 3], [1, 2, 4]]
snake_case__ : Optional[int] = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : Any = dc.update(1 )
snake_case__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : Tuple = dc.update(2 )
snake_case__ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(3 )
snake_case__ : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case__ : int = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 38 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1, 3_8_4, 2_4, 2_4] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : str = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : str = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : str = backbone_out_indices
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Dict = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : str = backbone_featmap_shape
snake_case__ : List[Any] = scope
snake_case__ : Optional[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
snake_case__ : List[Any] = (image_size // patch_size) ** 2
snake_case__ : Union[str, Any] = num_patches + 1
def __UpperCamelCase ( self ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
snake_case__ : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [9_6, 1_9_2, 3_8_4, 7_6_8],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = DPTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : str = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : Dict = DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = DPTModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
snake_case__ : Optional[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = False
snake_case__ : str = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
snake_case__ : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(config=__SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
snake_case__ : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
snake_case__ : Optional[int] = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase ( self ):
pass
@slow
def __UpperCamelCase ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
snake_case__ : List[str] = DPTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = """add"""
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
snake_case__ : Union[str, Any] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = outputs.predicted_depth
# verify the predicted depth
snake_case__ : Any = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''segformer'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Any = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : List[str] = patch_sizes
snake_case__ : str = strides
snake_case__ : Optional[int] = mlp_ratios
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : List[Any] = decoder_hidden_size
snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = semantic_loss_ignore_index
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ : str = 250004
A_ : str = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = MBartTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case__ : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : int = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : Tuple = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case__ : Any = tempfile.mkdtemp()
snake_case__ : Optional[int] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : List[Any] = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : Union[str, Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case__ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = '''facebook/mbart-large-en-ro'''
lowerCamelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase__ = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __UpperCamelCase ( cls ):
snake_case__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
snake_case__ : Any = 1
return cls
def __UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
snake_case__ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case__ : List[Any] = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = 1_0
snake_case__ : int = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = tempfile.mkdtemp()
snake_case__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = MBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case__ : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case__ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
snake_case__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1_0 , return_tensors="""pt""" )
snake_case__ : str = targets["""input_ids"""]
snake_case__ : Optional[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 38 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38 | 1 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = tf.convert_to_tensor(__magic_name__ )
snake_case__ : Tuple = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def UpperCamelCase__ ( __magic_name__ : Any ) -> Tuple:
'''simple docstring'''
snake_case__ : Tuple = tf.convert_to_tensor(__magic_name__ )
snake_case__ : Any = tf.cast(math.pi , x.dtype )
snake_case__ : Any = tf.cast(0.04_4715 , x.dtype )
snake_case__ : Dict = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__magic_name__ , 3 )) ))
return x * cdf
def UpperCamelCase__ ( __magic_name__ : Tuple ) -> int:
'''simple docstring'''
snake_case__ : Dict = tf.convert_to_tensor(__magic_name__ )
return x * tf.tanh(tf.math.softplus(__magic_name__ ) )
def UpperCamelCase__ ( __magic_name__ : Union[str, Any] ) -> int:
'''simple docstring'''
snake_case__ : Optional[Any] = tf.convert_to_tensor(__magic_name__ )
snake_case__ : int = tf.cast(0.04_4715 , x.dtype )
snake_case__ : Optional[int] = tf.cast(0.79_7884_5608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[Any] = tf.convert_to_tensor(__magic_name__ )
snake_case__ : List[Any] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def UpperCamelCase__ ( __magic_name__ : Tuple ) -> List[Any]:
'''simple docstring'''
return tf.clip_by_value(_gelu(__magic_name__ ) , -10 , 10 )
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : str=-1 ) -> Tuple:
'''simple docstring'''
snake_case__ , snake_case__ : List[str] = tf.split(__magic_name__ , 2 , axis=__magic_name__ )
return a * tf.math.sigmoid(__magic_name__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return tf.keras.activations.gelu(__magic_name__ , approximate=__magic_name__ )
A_ : Optional[Any] = tf.keras.activations.gelu
A_ : List[str] = approximate_gelu_wrap
else:
A_ : Any = _gelu
A_ : Optional[int] = _gelu_new
A_ : int = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def UpperCamelCase__ ( __magic_name__ : Tuple ) -> List[Any]:
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 38 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A_ : Tuple = get_logger(__name__)
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''dummy_data'''
lowerCamelCase__ = '''datasets'''
lowerCamelCase__ = False
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = dataset_name
snake_case__ : Optional[int] = cache_dir
snake_case__ : Union[str, Any] = use_local_dummy_data
snake_case__ : int = config
# download_callbacks take a single url as input
snake_case__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
snake_case__ : List[str] = None
snake_case__ : List[str] = None
@property
def __UpperCamelCase ( self ):
if self._dummy_file is None:
snake_case__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __UpperCamelCase ( self ):
if self._bucket_url is None:
snake_case__ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __UpperCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return path
def __UpperCamelCase ( self ):
return {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
snake_case__ : List[Any] = single_urls
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
snake_case__ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
snake_case__ : List[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : List[str] = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
def _iter_archive_members(__SCREAMING_SNAKE_CASE ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : List[str] = Path(self.dummy_file ).parent
snake_case__ : Dict = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = Path(__SCREAMING_SNAKE_CASE )
snake_case__ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open("""rb""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 | 1 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
A_ : str = None
try:
import msvcrt
except ImportError:
A_ : Optional[int] = None
try:
import fcntl
except ImportError:
A_ : List[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
A_ : int = OSError
# Data
# ------------------------------------------------
A_ : List[str] = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
A_ : Optional[Any] = "3.0.12"
A_ : int = None
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
global _logger
snake_case__ : Any = _logger or logging.getLogger(__name__ )
return _logger
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = lock_file
return None
def __str__( self ):
snake_case__ : List[str] = f"The file lock '{self.lock_file}' could not be acquired."
return temp
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = lock
return None
def __enter__( self ):
return self.lock
def __exit__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.lock.release()
return None
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=-1 , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Union[str, Any] = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
snake_case__ : Any = self.hash_filename_if_too_long(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# The path to the lock file.
snake_case__ : List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
snake_case__ : Any = None
# The default timeout value.
snake_case__ : List[Any] = timeout
# We use this lock primarily for the lock counter.
snake_case__ : int = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
snake_case__ : Union[str, Any] = 0
return None
@property
def __UpperCamelCase ( self ):
return self._lock_file
@property
def __UpperCamelCase ( self ):
return self._timeout
@timeout.setter
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = float(__SCREAMING_SNAKE_CASE )
return None
def __UpperCamelCase ( self ):
raise NotImplementedError()
def __UpperCamelCase ( self ):
raise NotImplementedError()
@property
def __UpperCamelCase ( self ):
return self._lock_file_fd is not None
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
snake_case__ : List[Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
snake_case__ : int = id(self )
snake_case__ : int = self._lock_file
snake_case__ : Optional[Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(f"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__SCREAMING_SNAKE_CASE )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
snake_case__ : int = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
snake_case__ : Any = id(self )
snake_case__ : List[str] = self._lock_file
logger().debug(f"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
snake_case__ : Dict = 0
logger().debug(f"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self ):
self.acquire()
return self
def __exit__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.release()
return None
def __del__( self ):
self.release(force=__SCREAMING_SNAKE_CASE )
return None
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = os.path.basename(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > max_length and max_length > 0:
snake_case__ : Optional[Any] = os.path.dirname(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = str(hash(__SCREAMING_SNAKE_CASE ) )
snake_case__ : str = filename[: max_length - len(__SCREAMING_SNAKE_CASE ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return path
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=-1 , __SCREAMING_SNAKE_CASE=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__SCREAMING_SNAKE_CASE , timeout=__SCREAMING_SNAKE_CASE , max_filename_length=__SCREAMING_SNAKE_CASE )
snake_case__ : str = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
snake_case__ : Dict = os.open(self._lock_file , __SCREAMING_SNAKE_CASE )
except OSError:
pass
else:
try:
msvcrt.locking(__SCREAMING_SNAKE_CASE , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Any = fd
return None
def __UpperCamelCase ( self ):
snake_case__ : Dict = self._lock_file_fd
snake_case__ : Dict = None
msvcrt.locking(__SCREAMING_SNAKE_CASE , msvcrt.LK_UNLCK , 1 )
os.close(__SCREAMING_SNAKE_CASE )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=-1 , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : List[Any] = os.statvfs(os.path.dirname(__SCREAMING_SNAKE_CASE ) ).f_namemax
super().__init__(__SCREAMING_SNAKE_CASE , timeout=__SCREAMING_SNAKE_CASE , max_filename_length=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = os.O_RDWR | os.O_CREAT | os.O_TRUNC
snake_case__ : Dict = os.open(self._lock_file , __SCREAMING_SNAKE_CASE )
try:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = fd
return None
def __UpperCamelCase ( self ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
snake_case__ : List[str] = self._lock_file_fd
snake_case__ : int = None
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
os.close(__SCREAMING_SNAKE_CASE )
return None
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
snake_case__ : Union[str, Any] = os.open(self._lock_file , __SCREAMING_SNAKE_CASE )
except OSError:
pass
else:
snake_case__ : Optional[int] = fd
return None
def __UpperCamelCase ( self ):
os.close(self._lock_file_fd )
snake_case__ : Tuple = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
A_ : Optional[int] = None
if msvcrt:
A_ : Union[str, Any] = WindowsFileLock
elif fcntl:
A_ : List[str] = UnixFileLock
else:
A_ : List[Any] = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 38 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = IFImgaImgSuperResolutionPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self ):
return self._get_superresolution_dummy_components()
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ):
self._test_save_load_local()
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 38 | 1 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
A_ : Dict = get_tests_dir("fixtures")
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# A mock response for an HTTP head request to emulate server down
snake_case__ : Optional[int] = mock.Mock()
snake_case__ : Dict = 5_0_0
snake_case__ : List[Any] = {}
snake_case__ : Tuple = HTTPError
snake_case__ : Optional[int] = {}
# Download this model to make sure it's in the cache.
snake_case__ : int = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__SCREAMING_SNAKE_CASE ) as mock_head:
snake_case__ : str = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self ):
# This test is for deprecated behavior and can be removed in v5
snake_case__ : Union[str, Any] = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def __UpperCamelCase ( self ):
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case__ : Optional[int] = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
snake_case__ : str = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCamelCase ( cls ):
snake_case__ : int = TOKEN
HfFolder.save_token(__SCREAMING_SNAKE_CASE )
@classmethod
def __UpperCamelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = ViTImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
snake_case__ : List[str] = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id="""test-image-processor""" , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token )
snake_case__ : Optional[int] = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self ):
snake_case__ : int = ViTImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
snake_case__ : Union[str, Any] = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token )
snake_case__ : Optional[Any] = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self ):
CustomImageProcessor.register_for_auto_class()
snake_case__ : List[str] = CustomImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
snake_case__ : int = AutoImageProcessor.from_pretrained(
f"{USER}/test-dynamic-image-processor" , trust_remote_code=__SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 38 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Dict = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=None , __magic_name__ : List[str]=None ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = True
while ask_again:
snake_case__ : Optional[Any] = input(__magic_name__ )
try:
if default is not None and len(__magic_name__ ) == 0:
return default
return convert_value(__magic_name__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Any=[] , __magic_name__ : Optional[int]=None , __magic_name__ : int=0 ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = BulletMenu(__magic_name__ , __magic_name__ )
snake_case__ : Optional[Any] = menu.run(default_choice=__magic_name__ )
return convert_value(__magic_name__ ) if convert_value is not None else result
def UpperCamelCase__ ( __magic_name__ : Any ) -> int:
'''simple docstring'''
snake_case__ : Tuple = int(__magic_name__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase__ ( __magic_name__ : str ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = int(__magic_name__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = int(__magic_name__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Tuple:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __snake_case ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 38 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = 42
# setable values
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = None
@classmethod
def __UpperCamelCase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return cls(common=__SCREAMING_SNAKE_CASE , init_noise_sigma=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
@dataclass
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 42
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCamelCase__ = 42
@property
def __UpperCamelCase ( self ):
return True
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE = 1_0_0_0 , __SCREAMING_SNAKE_CASE = 0.0001 , __SCREAMING_SNAKE_CASE = 0.02 , __SCREAMING_SNAKE_CASE = "linear" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "fixed_small" , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "epsilon" , __SCREAMING_SNAKE_CASE = jnp.floataa , ):
snake_case__ : str = dtype
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE = None ):
if common is None:
snake_case__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
snake_case__ : int = jnp.array(1.0 , dtype=self.dtype )
snake_case__ : int = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__SCREAMING_SNAKE_CASE , init_noise_sigma=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
return sample
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = () ):
snake_case__ : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
snake_case__ : Union[str, Any] = (jnp.arange(0 , __SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Tuple = state.common.alphas_cumprod[t]
snake_case__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
snake_case__ : Tuple = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
snake_case__ : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
snake_case__ : Any = jnp.clip(__SCREAMING_SNAKE_CASE , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
snake_case__ : Union[str, Any] = jnp.log(jnp.clip(__SCREAMING_SNAKE_CASE , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
snake_case__ : Any = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
snake_case__ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
snake_case__ : Optional[int] = variance
snake_case__ : Optional[int] = state.common.betas[t]
snake_case__ : Optional[Any] = (predicted_variance + 1) / 2
snake_case__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , ):
snake_case__ : List[str] = timestep
if key is None:
snake_case__ : str = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
snake_case__ , snake_case__ : Union[str, Any] = jnp.split(__SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
snake_case__ : List[Any] = None
# 1. compute alphas, betas
snake_case__ : Optional[int] = state.common.alphas_cumprod[t]
snake_case__ : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
snake_case__ : Any = 1 - alpha_prod_t
snake_case__ : Optional[Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
snake_case__ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
snake_case__ : str = model_output
elif self.config.prediction_type == "v_prediction":
snake_case__ : str = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
snake_case__ : Optional[Any] = jnp.clip(__SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ : Optional[int] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
snake_case__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
snake_case__ : Tuple = jax.random.split(__SCREAMING_SNAKE_CASE , num=1 )
snake_case__ : Dict = jax.random.normal(__SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , predicted_variance=__SCREAMING_SNAKE_CASE ) ** 0.5) * noise
snake_case__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
snake_case__ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__SCREAMING_SNAKE_CASE , state=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
return add_noise_common(state.common , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
return get_velocity_common(state.common , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __len__( self ):
return self.config.num_train_timesteps
| 38 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__magic_name__ ) / len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Any = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''resnet'''
lowerCamelCase__ = ['''basic''', '''bottleneck''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
snake_case__ : List[Any] = num_channels
snake_case__ : str = embedding_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : Dict = depths
snake_case__ : List[Any] = layer_type
snake_case__ : int = hidden_act
snake_case__ : Union[str, Any] = downsample_in_first_stage
snake_case__ : Dict = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-3
| 38 |
'''simple docstring'''
from __future__ import annotations
A_ : str = "Muhammad Umer Farooq"
A_ : Optional[Any] = "MIT"
A_ : int = "1.0.0"
A_ : int = "Muhammad Umer Farooq"
A_ : int = "[email protected]"
A_ : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__()
snake_case__ : list[str] = []
snake_case__ : List[Any] = domain
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case__ : str = parse.urljoin(self.domain , __SCREAMING_SNAKE_CASE )
self.urls.append(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(__magic_name__ ).split(""".""" )[-2:] )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return parse.urlparse(__magic_name__ ).netloc
def UpperCamelCase__ ( __magic_name__ : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
snake_case__ : List[str] = get_domain_name(__magic_name__ )
# Initialize the parser
snake_case__ : Optional[Any] = Parser(__magic_name__ )
try:
# Open URL
snake_case__ : Any = requests.get(__magic_name__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case__ : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case__ : Tuple = requests.get(__magic_name__ )
# Get the valid email.
snake_case__ : List[str] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__magic_name__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__magic_name__ )
if __name__ == "__main__":
A_ : str = emails_from_url("https://github.com")
print(F'{len(emails)} emails found:')
print("\n".join(sorted(emails)))
| 38 | 1 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_0_0 , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , ):
snake_case__ : Tuple = parent
snake_case__ : Optional[Any] = 1_0_0
snake_case__ : List[Any] = batch_size
snake_case__ : List[str] = image_size
snake_case__ : int = patch_size
snake_case__ : Tuple = num_channels
snake_case__ : List[str] = is_training
snake_case__ : List[str] = use_labels
snake_case__ : int = hidden_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Optional[Any] = type_sequence_label_size
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Union[str, Any] = scope
snake_case__ : Any = out_indices
snake_case__ : str = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : Optional[Any] = (image_size // patch_size) ** 2
snake_case__ : str = num_patches + 1
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : int = None
snake_case__ : List[Any] = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = BeitModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[int] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = BeitForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] = self.type_sequence_label_size
snake_case__ : Any = BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Any = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : Any = 1
snake_case__ : Any = BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = self.num_labels
snake_case__ : str = BeitForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : int = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
snake_case__ : Any = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = config_and_inputs
snake_case__ : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[str] = BeitModelTester(self )
snake_case__ : List[Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : int = [*signature.parameters.keys()]
snake_case__ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
if not self.model_tester.is_training:
return
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling]:
continue
snake_case__ : List[str] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
snake_case__ : List[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case__ : Optional[int] = False
snake_case__ : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(__SCREAMING_SNAKE_CASE )
model.train()
snake_case__ : str = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def __UpperCamelCase ( self ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[str] = BeitModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : Optional[int] = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values.to(__SCREAMING_SNAKE_CASE )
# prepare bool_masked_pos
snake_case__ : str = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Optional[Any] = model(pixel_values=__SCREAMING_SNAKE_CASE , bool_masked_pos=__SCREAMING_SNAKE_CASE )
snake_case__ : int = outputs.logits
# verify the logits
snake_case__ : Dict = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-2 ) )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = self.default_image_processor
snake_case__ : Tuple = prepare_img()
snake_case__ : int = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : str = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = outputs.logits
# verify the logits
snake_case__ : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
snake_case__ : List[str] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Any = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = outputs.logits
# verify the logits
snake_case__ : str = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
snake_case__ : Union[str, Any] = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
snake_case__ : List[Any] = model.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = BeitImageProcessor(do_resize=__SCREAMING_SNAKE_CASE , size=6_4_0 , do_center_crop=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
snake_case__ : int = Image.open(ds[0]["""file"""] )
snake_case__ : Tuple = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : List[str] = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = outputs.logits
# verify the logits
snake_case__ : str = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
snake_case__ : List[str] = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=__SCREAMING_SNAKE_CASE , )
else:
snake_case__ : Tuple = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=__SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
snake_case__ : str = model.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = BeitImageProcessor(do_resize=__SCREAMING_SNAKE_CASE , size=6_4_0 , do_center_crop=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
snake_case__ : Optional[int] = Image.open(ds[0]["""file"""] )
snake_case__ : List[str] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : int = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = outputs.logits.detach().cpu()
snake_case__ : int = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE , target_sizes=[(5_0_0, 3_0_0)] )
snake_case__ : Tuple = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
snake_case__ , snake_case__ : Dict = head.next, head
while fast and fast.next:
snake_case__ : Any = fast.next.next
snake_case__ : int = slow.next
snake_case__ : Dict = slow.next
snake_case__ : List[str] = None # Don't forget here! But forget still works!
# reverse the second part
snake_case__ : Tuple = None
while second:
snake_case__ : Tuple = second.next
snake_case__ : Any = node
snake_case__ : str = second
snake_case__ : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case__ : List[Any] = node.next
snake_case__ : int = head.next
return True
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[Any]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case__ : List[Any] = head
while fast and fast.next:
snake_case__ , snake_case__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case__ : Tuple = [slow.val]
while slow.next:
snake_case__ : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case__ : str = cur.next
return True
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
snake_case__ : int = {}
snake_case__ : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
snake_case__ : Tuple = [pos]
snake_case__ : Optional[Any] = head.next
pos += 1
snake_case__ : int = pos - 1
snake_case__ : str = 0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
snake_case__ : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 38 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : str = tempfile.mkdtemp()
# fmt: off
snake_case__ : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
snake_case__ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : int = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
snake_case__ : Any = {"""unk_token""": """<unk>"""}
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Union[str, Any] = {
"""do_resize""": True,
"""size""": 2_0,
"""do_center_crop""": True,
"""crop_size""": 1_8,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
snake_case__ : Tuple = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case__ : List[Any] = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.get_tokenizer()
snake_case__ : Any = self.get_rust_tokenizer()
snake_case__ : Dict = self.get_image_processor()
snake_case__ : int = CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
snake_case__ : List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
snake_case__ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case__ : Optional[int] = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
snake_case__ : List[str] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : str = CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.prepare_image_inputs()
snake_case__ : Optional[Any] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
snake_case__ : Union[str, Any] = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase ( self ):
snake_case__ : str = self.get_image_processor()
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : int = CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : str = """lower newer"""
snake_case__ : List[Any] = processor(text=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.get_image_processor()
snake_case__ : Union[str, Any] = self.get_tokenizer()
snake_case__ : Union[str, Any] = CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """lower newer"""
snake_case__ : str = self.prepare_image_inputs()
snake_case__ : Optional[int] = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : List[str] = self.get_tokenizer()
snake_case__ : Dict = CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : int = self.prepare_image_inputs()
snake_case__ : Optional[int] = self.prepare_image_inputs()
snake_case__ : Dict = processor(images=__SCREAMING_SNAKE_CASE , visual_prompt=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.get_image_processor()
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : int = CLIPSegProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : Tuple = processor.batch_decode(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ : str = 250004
A_ : str = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = MBartTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case__ : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : int = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : Tuple = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case__ : Any = tempfile.mkdtemp()
snake_case__ : Optional[int] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : List[Any] = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : Union[str, Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case__ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = '''facebook/mbart-large-en-ro'''
lowerCamelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase__ = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __UpperCamelCase ( cls ):
snake_case__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
snake_case__ : Any = 1
return cls
def __UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
snake_case__ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case__ : List[Any] = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = 1_0
snake_case__ : int = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = tempfile.mkdtemp()
snake_case__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = MBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case__ : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case__ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
snake_case__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1_0 , return_tensors="""pt""" )
snake_case__ : str = targets["""input_ids"""]
snake_case__ : Optional[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 38 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : str = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''openai-gpt'''
lowerCamelCase__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=4_0_4_7_8 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE="cls_index" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ):
snake_case__ : Tuple = vocab_size
snake_case__ : Tuple = n_positions
snake_case__ : Optional[int] = n_embd
snake_case__ : Any = n_layer
snake_case__ : Any = n_head
snake_case__ : List[Any] = afn
snake_case__ : str = resid_pdrop
snake_case__ : List[Any] = embd_pdrop
snake_case__ : Union[str, Any] = attn_pdrop
snake_case__ : str = layer_norm_epsilon
snake_case__ : Tuple = initializer_range
snake_case__ : Optional[Any] = summary_type
snake_case__ : Optional[Any] = summary_use_proj
snake_case__ : List[str] = summary_activation
snake_case__ : Dict = summary_first_dropout
snake_case__ : Any = summary_proj_to_labels
super().__init__(**__SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''bit'''
lowerCamelCase__ = ['''preactivation''', '''bottleneck''']
lowerCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="preactivation" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ : Tuple = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
snake_case__ : List[str] = num_channels
snake_case__ : Tuple = embedding_size
snake_case__ : str = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : List[Any] = layer_type
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = global_padding
snake_case__ : List[str] = num_groups
snake_case__ : str = drop_path_rate
snake_case__ : List[Any] = embedding_dynamic_padding
snake_case__ : List[str] = output_stride
snake_case__ : Dict = width_factor
snake_case__ : List[str] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38 | 1 |
'''simple docstring'''
from math import pow
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , ) -> tuple[int, int]:
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
snake_case__ : List[str] = int(pow(__magic_name__ , __magic_name__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
snake_case__ , snake_case__ : Union[str, Any] = backtrack(
__magic_name__ , __magic_name__ , current_number + 1 , __magic_name__ , __magic_name__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
snake_case__ , snake_case__ : Optional[Any] = backtrack(
__magic_name__ , __magic_name__ , current_number + 1 , __magic_name__ , __magic_name__ )
return current_sum, solutions_count
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int ) -> int:
'''simple docstring'''
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(__magic_name__ , __magic_name__ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=False ) -> Tuple:
'''simple docstring'''
snake_case__ : int = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : int = """"""
else:
snake_case__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = dct.pop(__magic_name__ )
snake_case__ : Dict = val
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int=False ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=__magic_name__ , )
snake_case__ : Optional[int] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=3_84 , num_labels=10_00 )
snake_case__ : Union[str, Any] = False
# load original model from timm
snake_case__ : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case__ : int = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : str = """huggingface/label-files"""
snake_case__ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case__ : Dict = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : str = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case__ : Union[str, Any] = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case__ : Optional[Any] = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case__ : Union[str, Any] = transform.transforms
snake_case__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case__ : Any = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ : Any = prepare_img()
snake_case__ : int = transform(__magic_name__ ).unsqueeze(0 )
snake_case__ : List[str] = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case__ : Optional[Any] = model(__magic_name__ )
snake_case__ : Union[str, Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
snake_case__ : Dict = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1E-3 )
else:
snake_case__ : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 38 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : int ) -> list:
'''simple docstring'''
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
snake_case__ : str = gray_code_sequence_string(__magic_name__ )
#
# convert them to integers
for i in range(len(__magic_name__ ) ):
snake_case__ : Optional[int] = int(sequence[i] , 2 )
return sequence
def UpperCamelCase__ ( __magic_name__ : int ) -> list:
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
snake_case__ : str = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
snake_case__ : Optional[int] = gray_code_sequence_string(bit_count - 1 )
snake_case__ : List[Any] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
snake_case__ : Tuple = """0""" + smaller_sequence[i]
sequence.append(__magic_name__ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
snake_case__ : Tuple = """1""" + smaller_sequence[i]
sequence.append(__magic_name__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 42
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE=True , ):
super().__init__()
snake_case__ : str = layers_per_block
snake_case__ : int = torch.nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : List[Any] = None
snake_case__ : List[Any] = nn.ModuleList([] )
# down
snake_case__ : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = output_channel
snake_case__ : Union[str, Any] = block_out_channels[i]
snake_case__ : int = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : str = get_down_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
snake_case__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# out
snake_case__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : str = 2 * out_channels if double_z else out_channels
snake_case__ : int = nn.Convad(block_out_channels[-1] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : Union[str, Any] = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = x
snake_case__ : int = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
snake_case__ : List[str] = down_block(__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
snake_case__ : Any = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : str = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE="group" , ):
super().__init__()
snake_case__ : Any = layers_per_block
snake_case__ : Optional[Any] = nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : Union[str, Any] = None
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : Optional[int] = in_channels if norm_type == """spatial""" else None
# mid
snake_case__ : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# up
snake_case__ : List[Any] = list(reversed(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = output_channel
snake_case__ : Optional[Any] = reversed_block_out_channels[i]
snake_case__ : List[str] = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : int = get_up_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , resnet_time_scale_shift=__SCREAMING_SNAKE_CASE , )
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : int = output_channel
# out
if norm_type == "spatial":
snake_case__ : List[Any] = SpatialNorm(block_out_channels[0] , __SCREAMING_SNAKE_CASE )
else:
snake_case__ : Any = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : Union[str, Any] = nn.Convad(block_out_channels[0] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : int = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Union[str, Any] = z
snake_case__ : Any = self.conv_in(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
snake_case__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
snake_case__ : int = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : List[Any] = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : Dict = up_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
snake_case__ : Optional[Any] = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : str = self.conv_norm_out(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="random" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ):
super().__init__()
snake_case__ : int = n_e
snake_case__ : Optional[int] = vq_embed_dim
snake_case__ : int = beta
snake_case__ : Optional[int] = legacy
snake_case__ : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
snake_case__ : List[str] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
snake_case__ : Optional[Any] = self.used.shape[0]
snake_case__ : List[str] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
snake_case__ : Dict = self.re_embed
snake_case__ : List[str] = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
snake_case__ : Union[str, Any] = n_e
snake_case__ : str = sane_index_shape
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : Dict = inds.reshape(ishape[0] , -1 )
snake_case__ : Any = self.used.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = (inds[:, :, None] == used[None, None, ...]).long()
snake_case__ : List[Any] = match.argmax(-1 )
snake_case__ : List[str] = match.sum(2 ) < 1
if self.unknown_index == "random":
snake_case__ : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
snake_case__ : Optional[Any] = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : int = inds.reshape(ishape[0] , -1 )
snake_case__ : Optional[int] = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
snake_case__ : List[Any] = 0 # simply set to zero
snake_case__ : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
# reshape z -> (batch, height, width, channel) and flatten
snake_case__ : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
snake_case__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
snake_case__ : Dict = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
snake_case__ : Union[str, Any] = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
snake_case__ : Tuple = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
snake_case__ : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
snake_case__ : Any = z + (z_q - z).detach()
# reshape back to match original input shape
snake_case__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
snake_case__ : List[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
snake_case__ : str = self.remap_to_used(__SCREAMING_SNAKE_CASE )
snake_case__ : str = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
snake_case__ : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
snake_case__ : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
snake_case__ : Optional[int] = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
snake_case__ : int = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
snake_case__ : str = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
snake_case__ : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
snake_case__ : Tuple = parameters
snake_case__ , snake_case__ : Any = torch.chunk(__SCREAMING_SNAKE_CASE , 2 , dim=1 )
snake_case__ : Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
snake_case__ : Optional[int] = deterministic
snake_case__ : Optional[int] = torch.exp(0.5 * self.logvar )
snake_case__ : Any = torch.exp(self.logvar )
if self.deterministic:
snake_case__ : List[str] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE = None ):
# make sure sample is on the same device as the parameters and has same dtype
snake_case__ : Dict = randn_tensor(
self.mean.shape , generator=__SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
snake_case__ : Optional[int] = self.mean + self.std * sample
return x
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
snake_case__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
return self.mean
| 38 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : int ) -> bool:
'''simple docstring'''
snake_case__ : Tuple = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 38 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1, 3_8_4, 2_4, 2_4] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : str = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : str = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : str = backbone_out_indices
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Dict = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : str = backbone_featmap_shape
snake_case__ : List[Any] = scope
snake_case__ : Optional[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
snake_case__ : List[Any] = (image_size // patch_size) ** 2
snake_case__ : Union[str, Any] = num_patches + 1
def __UpperCamelCase ( self ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
snake_case__ : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [9_6, 1_9_2, 3_8_4, 7_6_8],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = DPTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : str = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : Dict = DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = DPTModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
snake_case__ : Optional[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = False
snake_case__ : str = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
snake_case__ : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(config=__SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
snake_case__ : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
snake_case__ : Optional[int] = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase ( self ):
pass
@slow
def __UpperCamelCase ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
snake_case__ : List[str] = DPTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = """add"""
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
snake_case__ : Union[str, Any] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = outputs.predicted_depth
# verify the predicted depth
snake_case__ : Any = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 1 |
'''simple docstring'''
A_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 1000000,
"gigajoule": 1000000000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 3600000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 4186800.00,
"electronvolt": 1.602176634e-19,
"britishthermalunit_it": 1055.05585,
"footpound": 1.355818,
}
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : float ) -> float:
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case__ : int = (
f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(__magic_name__ )}"
)
raise ValueError(__magic_name__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case__ : int = botoa.client("""iam""" )
snake_case__ : Union[str, Any] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__magic_name__ , AssumeRolePolicyDocument=json.dumps(__magic_name__ , indent=2 ) )
snake_case__ : Dict = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__magic_name__ , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(__magic_name__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def UpperCamelCase__ ( __magic_name__ : Any ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = botoa.client("""iam""" )
return iam_client.get_role(RoleName=__magic_name__ )["Role"]["Arn"]
def UpperCamelCase__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __magic_name__ , )
snake_case__ : List[Any] = None
if credentials_configuration == 0:
snake_case__ : Dict = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
snake_case__ : List[str] = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
snake_case__ : List[str] = _ask_field("""AWS Access Key ID: """ )
snake_case__ : int = aws_access_key_id
snake_case__ : Optional[Any] = _ask_field("""AWS Secret Access Key: """ )
snake_case__ : List[str] = aws_secret_access_key
snake_case__ : Tuple = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
snake_case__ : Optional[int] = aws_region
snake_case__ : int = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __magic_name__ , )
if role_management == 0:
snake_case__ : Optional[Any] = _ask_field("""Enter your IAM role name: """ )
else:
snake_case__ : Optional[int] = """accelerate_sagemaker_execution_role"""
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(__magic_name__ )
snake_case__ : Dict = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Any = None
if is_custom_docker_image:
snake_case__ : str = _ask_field("""Enter your Docker image: """ , lambda __magic_name__ : str(__magic_name__ ).lower() )
snake_case__ : Tuple = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : List[Any] = None
if is_sagemaker_inputs_enabled:
snake_case__ : str = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Optional[int] = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Optional[Any] = None
if is_sagemaker_metrics_enabled:
snake_case__ : List[Any] = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Tuple = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
snake_case__ : Any = {}
snake_case__ : List[Any] = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
snake_case__ : str = """dynamo_"""
snake_case__ : Tuple = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case__ : List[str] = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
snake_case__ : str = _ask_options(
"""Which mode do you want to use?""" , __magic_name__ , lambda __magic_name__ : TORCH_DYNAMO_MODES[int(__magic_name__ )] , default="""default""" , )
snake_case__ : Union[str, Any] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : str = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Dict = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
snake_case__ : List[str] = _ask_options(
__magic_name__ , __magic_name__ , lambda __magic_name__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__magic_name__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case__ : Optional[int] = _ask_field(__magic_name__ , lambda __magic_name__ : str(__magic_name__ ).lower() , default="""ml.p3.2xlarge""" )
snake_case__ : Dict = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case__ : Optional[Any] = _ask_field(
"""How many machines do you want use? [1]: """ , __magic_name__ , default=1 , )
snake_case__ : Union[str, Any] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=__magic_name__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__magic_name__ , use_cpu=__magic_name__ , dynamo_config=__magic_name__ , eca_instance_type=__magic_name__ , profile=__magic_name__ , region=__magic_name__ , iam_role_name=__magic_name__ , mixed_precision=__magic_name__ , num_machines=__magic_name__ , sagemaker_inputs_file=__magic_name__ , sagemaker_metrics_file=__magic_name__ , )
| 38 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
A_ : List[str] = logging.getLogger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=-1 ):
# in NER datasets, the last column is usually reserved for NER label
snake_case__ : List[str] = label_idx
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = mode.value
snake_case__ : List[str] = os.path.join(__SCREAMING_SNAKE_CASE , f"{mode}.txt" )
snake_case__ : List[str] = 1
snake_case__ : str = []
with open(__SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
snake_case__ : Any = []
snake_case__ : Optional[Any] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) )
guid_index += 1
snake_case__ : Any = []
snake_case__ : int = []
else:
snake_case__ : Any = line.split(""" """ )
words.append(splits[0] )
if len(__SCREAMING_SNAKE_CASE ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) )
return examples
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(__SCREAMING_SNAKE_CASE )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
snake_case__ : Any = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(__SCREAMING_SNAKE_CASE )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if path:
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
snake_case__ : List[Any] = f.read().splitlines()
if "O" not in labels:
snake_case__ : Any = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if path:
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
snake_case__ : Optional[int] = f.read().splitlines()
if "O" not in labels:
snake_case__ : List[Any] = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = mode.value
snake_case__ : int = os.path.join(__SCREAMING_SNAKE_CASE , f"{mode}.txt" )
snake_case__ : Union[str, Any] = 1
snake_case__ : Any = []
with open(__SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
for sentence in parse_incr(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = []
snake_case__ : Optional[Any] = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) )
guid_index += 1
return examples
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = 0
for sentence in parse_incr(__SCREAMING_SNAKE_CASE ):
snake_case__ : int = preds_list[example_id]
snake_case__ : Any = """"""
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(__SCREAMING_SNAKE_CASE )
example_id += 1
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if path:
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 38 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase__ ( __magic_name__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
snake_case__ : Union[str, Any] = f"https://www.amazon.in/laptop/s?k={product}"
snake_case__ : List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
snake_case__ : int = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
snake_case__ : Optional[int] = item.ha.text
snake_case__ : Any = """https://www.amazon.in/""" + item.ha.a["""href"""]
snake_case__ : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
snake_case__ : Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
snake_case__ : Optional[int] = """Not available"""
try:
snake_case__ : Tuple = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
snake_case__ : Optional[Any] = """"""
try:
snake_case__ : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_00 )
except ValueError:
snake_case__ : List[Any] = float("""nan""" )
except AttributeError:
pass
snake_case__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : List[Any] = """ """
snake_case__ : Union[str, Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : int = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 38 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class __snake_case ( nn.Module ):
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = (16, 32, 96, 256)
lowerCamelCase__ = jnp.floataa
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case__ : Tuple = []
for i in range(len(self.block_out_channels ) - 1 ):
snake_case__ : Optional[int] = self.block_out_channels[i]
snake_case__ : Optional[int] = self.block_out_channels[i + 1]
snake_case__ : Tuple = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : str = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = blocks
snake_case__ : Optional[Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = self.conv_in(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = nn.silu(__SCREAMING_SNAKE_CASE )
for block in self.blocks:
snake_case__ : List[str] = block(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = nn.silu(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.conv_out(__SCREAMING_SNAKE_CASE )
return embedding
@flax_register_to_config
class __snake_case ( nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 32
lowerCamelCase__ = 4
lowerCamelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCamelCase__ = False
lowerCamelCase__ = (320, 640, 1_280, 1_280)
lowerCamelCase__ = 2
lowerCamelCase__ = 8
lowerCamelCase__ = None
lowerCamelCase__ = 1_280
lowerCamelCase__ = 0.0
lowerCamelCase__ = False
lowerCamelCase__ = jnp.floataa
lowerCamelCase__ = True
lowerCamelCase__ = 0
lowerCamelCase__ = "rgb"
lowerCamelCase__ = (16, 32, 96, 256)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
# init input tensors
snake_case__ : Optional[int] = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case__ : Union[str, Any] = jnp.zeros(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
snake_case__ : str = jnp.ones((1,) , dtype=jnp.intaa )
snake_case__ : Any = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case__ : str = (1, 3, self.sample_size * 8, self.sample_size * 8)
snake_case__ : Any = jnp.zeros(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
snake_case__ , snake_case__ : Tuple = jax.random.split(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )["params"]
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.block_out_channels
snake_case__ : List[str] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case__ : str = self.num_attention_heads or self.attention_head_dim
# input
snake_case__ : List[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case__ : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case__ : Optional[int] = FlaxTimestepEmbedding(__SCREAMING_SNAKE_CASE , dtype=self.dtype )
snake_case__ : str = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
snake_case__ : Tuple = self.only_cross_attention
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case__ : Dict = []
snake_case__ : Tuple = []
snake_case__ : Optional[int] = block_out_channels[0]
snake_case__ : Tuple = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
for i, down_block_type in enumerate(self.down_block_types ):
snake_case__ : int = output_channel
snake_case__ : Optional[int] = block_out_channels[i]
snake_case__ : Optional[int] = i == len(__SCREAMING_SNAKE_CASE ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case__ : Tuple = FlaxCrossAttnDownBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
snake_case__ : Dict = FlaxDownBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__SCREAMING_SNAKE_CASE )
for _ in range(self.layers_per_block ):
snake_case__ : Dict = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
if not is_final_block:
snake_case__ : int = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = down_blocks
snake_case__ : Union[str, Any] = controlnet_down_blocks
# mid
snake_case__ : int = block_out_channels[-1]
snake_case__ : Union[str, Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
snake_case__ : int = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = False , ):
snake_case__ : Tuple = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
snake_case__ : List[Any] = jnp.flip(__SCREAMING_SNAKE_CASE , axis=1 )
# 1. time
if not isinstance(__SCREAMING_SNAKE_CASE , jnp.ndarray ):
snake_case__ : int = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__SCREAMING_SNAKE_CASE , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case__ : str = timesteps.astype(dtype=jnp.floataa )
snake_case__ : Any = jnp.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
snake_case__ : Optional[int] = self.time_proj(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.time_embedding(__SCREAMING_SNAKE_CASE )
# 2. pre-process
snake_case__ : List[Any] = jnp.transpose(__SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
snake_case__ : List[Any] = self.conv_in(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = jnp.transpose(__SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
snake_case__ : List[str] = self.controlnet_cond_embedding(__SCREAMING_SNAKE_CASE )
sample += controlnet_cond
# 3. down
snake_case__ : List[str] = (sample,)
for down_block in self.down_blocks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ , snake_case__ : Optional[Any] = down_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
else:
snake_case__ , snake_case__ : Optional[int] = down_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
snake_case__ : Union[str, Any] = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
# 5. contronet blocks
snake_case__ : Tuple = ()
for down_block_res_sample, controlnet_block in zip(__SCREAMING_SNAKE_CASE , self.controlnet_down_blocks ):
snake_case__ : Any = controlnet_block(__SCREAMING_SNAKE_CASE )
controlnet_down_block_res_samples += (down_block_res_sample,)
snake_case__ : List[str] = controlnet_down_block_res_samples
snake_case__ : int = self.controlnet_mid_block(__SCREAMING_SNAKE_CASE )
# 6. scaling
snake_case__ : List[str] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__SCREAMING_SNAKE_CASE , mid_block_res_sample=__SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LongformerTokenizer
lowerCamelCase__ = True
lowerCamelCase__ = LongformerTokenizerFast
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Optional[int] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Any = {"""unk_token""": """<unk>"""}
snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = """lower newer"""
snake_case__ : Dict = """lower newer"""
return input_text, output_text
def __UpperCamelCase ( self ):
snake_case__ : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = """lower newer"""
snake_case__ : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokens + [tokenizer.unk_token]
snake_case__ : List[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
snake_case__ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : int = """Encode this sequence."""
snake_case__ : Union[str, Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
snake_case__ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
snake_case__ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
snake_case__ : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
snake_case__ : Dict = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Encode <mask> sequence"""
snake_case__ : Tuple = """Encode <mask>sequence"""
snake_case__ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : str = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """A, <mask> AllenNLP sentence."""
snake_case__ : str = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __UpperCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case__ : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""trim_offsets"""] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ : Any = f"{text_of_1_token} {text_of_1_token}"
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 38 | 1 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
A_ : Dict = logging.getLogger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''sequence-classification'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
if type(__SCREAMING_SNAKE_CASE ) == dict:
snake_case__ : Tuple = Namespace(**__SCREAMING_SNAKE_CASE )
snake_case__ : int = glue_output_modes[hparams.task]
snake_case__ : str = glue_tasks_num_labels[hparams.task]
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.mode )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return self.model(**__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case__ : Dict = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
snake_case__ : Optional[Any] = self(**__SCREAMING_SNAKE_CASE )
snake_case__ : str = outputs[0]
snake_case__ : List[str] = self.trainer.lr_schedulers[0]["""scheduler"""]
snake_case__ : Union[str, Any] = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __UpperCamelCase ( self ):
snake_case__ : Any = self.hparams
snake_case__ : Dict = processors[args.task]()
snake_case__ : Tuple = processor.get_labels()
for mode in ["train", "dev"]:
snake_case__ : List[Any] = self._feature_file(__SCREAMING_SNAKE_CASE )
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
snake_case__ : List[str] = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
snake_case__ : Tuple = convert_examples_to_features(
__SCREAMING_SNAKE_CASE , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , __SCREAMING_SNAKE_CASE )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ):
snake_case__ : Tuple = """dev""" if mode == """test""" else mode
snake_case__ : Optional[Any] = self._feature_file(__SCREAMING_SNAKE_CASE )
logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = torch.load(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case__ : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
snake_case__ : int = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
snake_case__ : Dict = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
snake_case__ : str = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , batch_size=__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case__ : int = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
snake_case__ : Dict = self(**__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ : List[str] = outputs[:2]
snake_case__ : Optional[Any] = logits.detach().cpu().numpy()
snake_case__ : Union[str, Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
snake_case__ : List[Any] = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
snake_case__ : List[str] = np.argmax(__SCREAMING_SNAKE_CASE , axis=1 )
elif self.hparams.glue_output_mode == "regression":
snake_case__ : List[str] = np.squeeze(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
snake_case__ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
snake_case__ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
snake_case__ : Tuple = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
snake_case__ : Tuple = dict(results.items() )
snake_case__ : List[Any] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ , snake_case__ , snake_case__ : Optional[Any] = self._eval_end(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ , snake_case__ , snake_case__ : int = self._eval_end(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
parser.add_argument(
"""--max_seq_length""" , default=1_2_8 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def UpperCamelCase__ ( ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Dict = argparse.ArgumentParser()
add_generic_args(__magic_name__ , os.getcwd() )
snake_case__ : List[str] = GLUETransformer.add_model_specific_args(__magic_name__ , os.getcwd() )
snake_case__ : Dict = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
snake_case__ : Union[str, Any] = os.path.join(
"""./results""" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
snake_case__ : Dict = GLUETransformer(__magic_name__ )
snake_case__ : Dict = generic_train(__magic_name__ , __magic_name__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
snake_case__ : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=__magic_name__ ) )
snake_case__ : Any = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__magic_name__ )
if __name__ == "__main__":
main()
| 38 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Any = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''resnet'''
lowerCamelCase__ = ['''basic''', '''bottleneck''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
snake_case__ : List[Any] = num_channels
snake_case__ : str = embedding_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : Dict = depths
snake_case__ : List[Any] = layer_type
snake_case__ : int = hidden_act
snake_case__ : Union[str, Any] = downsample_in_first_stage
snake_case__ : Dict = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-3
| 38 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = ['''note_seq''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
requires_backends(self , ["""note_seq"""] )
@classmethod
def __UpperCamelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
requires_backends(cls , ["""note_seq"""] )
@classmethod
def __UpperCamelCase ( cls , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
requires_backends(cls , ["""note_seq"""] )
| 38 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 38 | 1 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : Any ) -> str:
'''simple docstring'''
assert isinstance(__magic_name__ , __magic_name__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> str:
'''simple docstring'''
snake_case__ : Optional[Any] = tmp_path / """cache"""
snake_case__ : List[Any] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ : List[str] = TextDatasetReader(__magic_name__ , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ ).read()
_check_text_dataset(__magic_name__ , __magic_name__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[int] = tmp_path / """cache"""
snake_case__ : List[Any] = {"""text""": """string"""}
snake_case__ : List[Any] = features.copy() if features else default_expected_features
snake_case__ : Union[str, Any] = (
Features({feature: Value(__magic_name__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : Dict = TextDatasetReader(__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ ).read()
_check_text_dataset(__magic_name__ , __magic_name__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : Tuple = tmp_path / """cache"""
snake_case__ : List[Any] = {"""text""": """string"""}
snake_case__ : List[str] = TextDatasetReader(__magic_name__ , cache_dir=__magic_name__ , split=__magic_name__ ).read()
_check_text_dataset(__magic_name__ , __magic_name__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : int , __magic_name__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if issubclass(__magic_name__ , __magic_name__ ):
snake_case__ : Union[str, Any] = text_path
elif issubclass(__magic_name__ , __magic_name__ ):
snake_case__ : Optional[int] = [text_path]
snake_case__ : str = tmp_path / """cache"""
snake_case__ : Any = {"""text""": """string"""}
snake_case__ : int = TextDatasetReader(__magic_name__ , cache_dir=__magic_name__ ).read()
_check_text_dataset(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : Dict=("train",) ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__magic_name__ , __magic_name__ )
for split in splits:
snake_case__ : Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Any ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = tmp_path / """cache"""
snake_case__ : str = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ ).read()
_check_text_datasetdict(__magic_name__ , __magic_name__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def UpperCamelCase__ ( __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : Dict = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case__ : Any = {"""text""": """string"""}
snake_case__ : Dict = features.copy() if features else default_expected_features
snake_case__ : Optional[int] = (
Features({feature: Value(__magic_name__ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : str = TextDatasetReader({"""train""": text_path} , features=__magic_name__ , cache_dir=__magic_name__ ).read()
_check_text_datasetdict(__magic_name__ , __magic_name__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
if split:
snake_case__ : Dict = {split: text_path}
else:
snake_case__ : Any = """train"""
snake_case__ : Union[str, Any] = {"""train""": text_path, """test""": text_path}
snake_case__ : Any = tmp_path / """cache"""
snake_case__ : Optional[Any] = {"""text""": """string"""}
snake_case__ : Dict = TextDatasetReader(__magic_name__ , cache_dir=__magic_name__ ).read()
_check_text_datasetdict(__magic_name__ , __magic_name__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 38 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
snake_case__ : str = []
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_init_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_evaluate""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_predict""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_save""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_log""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_prediction_step""" )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = tempfile.mkdtemp()
def __UpperCamelCase ( self ):
shutil.rmtree(self.output_dir )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : List[Any] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionModelConfig(a=__SCREAMING_SNAKE_CASE , b=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionPreTrainedModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = TrainingArguments(self.output_dir , disable_tqdm=__SCREAMING_SNAKE_CASE , report_to=[] , **__SCREAMING_SNAKE_CASE )
return Trainer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , callbacks=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case__ : Tuple = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = len(trainer.get_eval_dataloader() )
snake_case__ : Any = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_trainer()
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Optional[Any] = self.get_trainer(disable_tqdm=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.get_trainer()
snake_case__ : List[str] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 38 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = (PNDMScheduler,)
lowerCamelCase__ = (('''num_inference_steps''', 50),)
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = dict(self.forward_default_kwargs )
snake_case__ : Union[str, Any] = kwargs.pop("""num_inference_steps""" , __SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.dummy_sample
snake_case__ : Any = 0.1 * sample
snake_case__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ : Dict = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
snake_case__ : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
snake_case__ : Any = dummy_past_residuals[:]
snake_case__ : int = scheduler.step_prk(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
snake_case__ : str = new_scheduler.step_prk(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
snake_case__ : Tuple = scheduler.step_plms(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
snake_case__ : Optional[Any] = new_scheduler.step_plms(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = dict(self.forward_default_kwargs )
snake_case__ : str = kwargs.pop("""num_inference_steps""" , __SCREAMING_SNAKE_CASE )
snake_case__ : str = self.dummy_sample
snake_case__ : Optional[Any] = 0.1 * sample
snake_case__ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ : List[Any] = self.get_scheduler_config()
snake_case__ : str = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
snake_case__ : List[str] = dummy_past_residuals[:]
snake_case__ : int = scheduler.step_prk(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
snake_case__ : Dict = new_scheduler.step_prk(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
snake_case__ : int = scheduler.step_plms(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
snake_case__ : int = new_scheduler.step_plms(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.scheduler_classes[0]
snake_case__ : int = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = scheduler_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = 1_0
snake_case__ : int = self.dummy_model()
snake_case__ : Any = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.prk_timesteps ):
snake_case__ : Dict = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = scheduler.step_prk(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = scheduler.step_plms(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __UpperCamelCase ( self ):
snake_case__ : str = dict(self.forward_default_kwargs )
snake_case__ : str = kwargs.pop("""num_inference_steps""" , __SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
snake_case__ : Optional[Any] = self.get_scheduler_config()
snake_case__ : str = scheduler_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.dummy_sample
snake_case__ : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , """set_timesteps""" ):
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , """set_timesteps""" ):
snake_case__ : int = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case__ : Any = dummy_past_residuals[:]
snake_case__ : Optional[Any] = scheduler.step_prk(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
snake_case__ : Optional[Any] = scheduler.step_prk(__SCREAMING_SNAKE_CASE , 1 , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
snake_case__ : Tuple = scheduler.step_plms(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
snake_case__ : Dict = scheduler.step_plms(__SCREAMING_SNAKE_CASE , 1 , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCamelCase ( self ):
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
snake_case__ : Tuple = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
snake_case__ : List[str] = 2_7
for scheduler_class in self.scheduler_classes:
snake_case__ : str = self.dummy_sample
snake_case__ : str = 0.1 * sample
snake_case__ : int = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
snake_case__ : Union[str, Any] = scheduler.step_prk(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
def __UpperCamelCase ( self ):
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : Optional[int] = self.get_scheduler_config()
snake_case__ : str = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.full_loop()
snake_case__ : Optional[int] = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
snake_case__ : int = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.full_loop(prediction_type="""v_prediction""" )
snake_case__ : List[Any] = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
snake_case__ : List[str] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def __UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
snake_case__ : Optional[int] = self.full_loop(set_alpha_to_one=__SCREAMING_SNAKE_CASE , beta_start=0.01 )
snake_case__ : str = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Dict = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def __UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
snake_case__ : Dict = self.full_loop(set_alpha_to_one=__SCREAMING_SNAKE_CASE , beta_start=0.01 )
snake_case__ : List[Any] = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
snake_case__ : List[Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 38 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ):
snake_case__ : Any = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : Tuple = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : Union[str, Any] = image_std
def __UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : str = DPTImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 38 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
A_ : Optional[Any] = "\\n\n"
A_ : int = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
A_ : Any = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def __UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1_6 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
snake_case__ : int = """cuda"""
else:
snake_case__ : Any = """cuda""" if torch.cuda.is_available() else """cpu"""
snake_case__ : int = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = model.to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
snake_case__ : List[Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
snake_case__ : Dict = model.config.max_length - 1
else:
snake_case__ : List[str] = model.config.max_length
snake_case__ : str = tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , return_attention_mask=__SCREAMING_SNAKE_CASE , ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = encodings["""input_ids"""]
snake_case__ : Union[str, Any] = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
snake_case__ : int = []
snake_case__ : Optional[Any] = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) ):
snake_case__ : Optional[int] = min(start_index + batch_size , len(__SCREAMING_SNAKE_CASE ) )
snake_case__ : int = encoded_texts[start_index:end_index]
snake_case__ : Optional[Any] = attn_masks[start_index:end_index]
if add_start_token:
snake_case__ : Any = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
snake_case__ : Optional[Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask] , dim=1 )
snake_case__ : Union[str, Any] = encoded_batch
with torch.no_grad():
snake_case__ : Any = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).logits
snake_case__ : Optional[Any] = out_logits[..., :-1, :].contiguous()
snake_case__ : Tuple = labels[..., 1:].contiguous()
snake_case__ : Tuple = attn_mask[..., 1:].contiguous()
snake_case__ : Optional[Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 38 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """num_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1_6, 4_8, 9_6] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 1_0] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ):
snake_case__ : List[str] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : List[Any] = patch_sizes
snake_case__ : Optional[int] = patch_stride
snake_case__ : Optional[Any] = patch_padding
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : Dict = num_labels
snake_case__ : Optional[Any] = num_channels
snake_case__ : Optional[Any] = embed_dim
snake_case__ : Optional[int] = num_heads
snake_case__ : Optional[int] = stride_kv
snake_case__ : int = depth
snake_case__ : Optional[Any] = cls_token
snake_case__ : List[Any] = attention_drop_rate
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFCvtModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = (self.image_size, self.image_size)
snake_case__ , snake_case__ : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case__ : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case__ : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : str = TFCvtForImageClassification(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtModelTester(self )
snake_case__ : Any = TFCvtConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def __UpperCamelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __UpperCamelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__SCREAMING_SNAKE_CASE )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[int] = outputs.hidden_states
snake_case__ : Tuple = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFCvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : int = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 1 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A_ : Tuple = get_logger(__name__)
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''dummy_data'''
lowerCamelCase__ = '''datasets'''
lowerCamelCase__ = False
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = dataset_name
snake_case__ : Optional[int] = cache_dir
snake_case__ : Union[str, Any] = use_local_dummy_data
snake_case__ : int = config
# download_callbacks take a single url as input
snake_case__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
snake_case__ : List[str] = None
snake_case__ : List[str] = None
@property
def __UpperCamelCase ( self ):
if self._dummy_file is None:
snake_case__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __UpperCamelCase ( self ):
if self._bucket_url is None:
snake_case__ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __UpperCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return path
def __UpperCamelCase ( self ):
return {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
snake_case__ : List[Any] = single_urls
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
snake_case__ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
snake_case__ : List[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : List[str] = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
def _iter_archive_members(__SCREAMING_SNAKE_CASE ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : List[str] = Path(self.dummy_file ).parent
snake_case__ : Dict = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = Path(__SCREAMING_SNAKE_CASE )
snake_case__ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open("""rb""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
snake_case__ : int = [[1, 2, 4], [1, 2, 3, 4]]
snake_case__ : Any = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCamelCase ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
snake_case__ : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def __UpperCamelCase ( self ):
snake_case__ : List[str] = [[1, 2, 3], [1, 2, 4]]
snake_case__ : Optional[int] = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : Any = dc.update(1 )
snake_case__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : Tuple = dc.update(2 )
snake_case__ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(3 )
snake_case__ : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case__ : int = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 38 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
A_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase__ ( __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__magic_name__ ):
return ext
raise Exception(
f"Unable to determine file format from file extension {path}. "
f"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> int:
'''simple docstring'''
snake_case__ : Optional[Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
snake_case__ : List[Any] = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
snake_case__ : Dict = PipelineDataFormat.from_str(
format=__magic_name__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__magic_name__ , __magic_name__ )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = nlp
snake_case__ : Union[str, Any] = reader
@staticmethod
def __UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=__SCREAMING_SNAKE_CASE , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=__SCREAMING_SNAKE_CASE , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=__SCREAMING_SNAKE_CASE , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=__SCREAMING_SNAKE_CASE , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=__SCREAMING_SNAKE_CASE , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=__SCREAMING_SNAKE_CASE , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=__SCREAMING_SNAKE_CASE , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=__SCREAMING_SNAKE_CASE , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[Any] = self._nlp, []
for entry in self._reader:
snake_case__ : Tuple = nlp(**__SCREAMING_SNAKE_CASE ) if self._reader.is_multi_columns else nlp(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
outputs.append(__SCREAMING_SNAKE_CASE )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
snake_case__ : int = self._reader.save_binary(__SCREAMING_SNAKE_CASE )
logger.warning(f"Current pipeline requires output to be in binary format, saving at {binary_path}" )
else:
self._reader.save(__SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''segformer'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Any = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : List[str] = patch_sizes
snake_case__ : str = strides
snake_case__ : Optional[int] = mlp_ratios
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : List[Any] = decoder_hidden_size
snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = semantic_loss_ignore_index
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Union[str, Any] = {
"configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"],
"tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ["BertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
"BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
"load_tf_weights_in_bert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBertEmbeddings",
"TFBertForMaskedLM",
"TFBertForMultipleChoice",
"TFBertForNextSentencePrediction",
"TFBertForPreTraining",
"TFBertForQuestionAnswering",
"TFBertForSequenceClassification",
"TFBertForTokenClassification",
"TFBertLMHeadModel",
"TFBertMainLayer",
"TFBertModel",
"TFBertPreTrainedModel",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = ["TFBertTokenizer"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"FlaxBertForCausalLM",
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 38 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : int ) -> int:
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""only integers accepted as input""" )
else:
snake_case__ : str = str(abs(__magic_name__ ) )
snake_case__ : Dict = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int("""""".join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 38 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A_ : Tuple = get_logger(__name__)
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''dummy_data'''
lowerCamelCase__ = '''datasets'''
lowerCamelCase__ = False
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = dataset_name
snake_case__ : Optional[int] = cache_dir
snake_case__ : Union[str, Any] = use_local_dummy_data
snake_case__ : int = config
# download_callbacks take a single url as input
snake_case__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
snake_case__ : List[str] = None
snake_case__ : List[str] = None
@property
def __UpperCamelCase ( self ):
if self._dummy_file is None:
snake_case__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __UpperCamelCase ( self ):
if self._bucket_url is None:
snake_case__ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __UpperCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return path
def __UpperCamelCase ( self ):
return {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
snake_case__ : List[Any] = single_urls
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
snake_case__ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
snake_case__ : List[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : List[str] = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
def _iter_archive_members(__SCREAMING_SNAKE_CASE ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : List[str] = Path(self.dummy_file ).parent
snake_case__ : Dict = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = Path(__SCREAMING_SNAKE_CASE )
snake_case__ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open("""rb""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
snake_case__ : List[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
snake_case__ : int = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
snake_case__ : Dict = max(len(__magic_name__ ) , len(__magic_name__ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) , b_binary.zfill(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = IFImgaImgSuperResolutionPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self ):
return self._get_superresolution_dummy_components()
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ):
self._test_save_load_local()
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 38 | 1 |
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 38 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Dict = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=None , __magic_name__ : List[str]=None ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = True
while ask_again:
snake_case__ : Optional[Any] = input(__magic_name__ )
try:
if default is not None and len(__magic_name__ ) == 0:
return default
return convert_value(__magic_name__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Any=[] , __magic_name__ : Optional[int]=None , __magic_name__ : int=0 ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = BulletMenu(__magic_name__ , __magic_name__ )
snake_case__ : Optional[Any] = menu.run(default_choice=__magic_name__ )
return convert_value(__magic_name__ ) if convert_value is not None else result
def UpperCamelCase__ ( __magic_name__ : Any ) -> int:
'''simple docstring'''
snake_case__ : Tuple = int(__magic_name__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase__ ( __magic_name__ : str ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = int(__magic_name__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = int(__magic_name__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Tuple:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __snake_case ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 38 | 1 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : bool = True , __magic_name__ : float = math.inf , __magic_name__ : float = -math.inf , __magic_name__ : float = math.inf , __magic_name__ : float = -math.inf , __magic_name__ : bool = False , __magic_name__ : float = 1_00 , __magic_name__ : float = 0.01 , __magic_name__ : float = 1 , ) -> Any:
'''simple docstring'''
snake_case__ : Union[str, Any] = False
snake_case__ : List[Any] = search_prob
snake_case__ : Union[str, Any] = start_temperate
snake_case__ : Union[str, Any] = []
snake_case__ : Any = 0
snake_case__ : Dict = None
while not search_end:
snake_case__ : Dict = current_state.score()
if best_state is None or current_score > best_state.score():
snake_case__ : Any = current_state
scores.append(__magic_name__ )
iterations += 1
snake_case__ : str = None
snake_case__ : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
snake_case__ : Any = random.randint(0 , len(__magic_name__ ) - 1 ) # picking a random neighbor
snake_case__ : Optional[Any] = neighbors.pop(__magic_name__ )
snake_case__ : str = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
snake_case__ : int = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
snake_case__ : Dict = picked_neighbor
else:
snake_case__ : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
snake_case__ : List[str] = picked_neighbor
snake_case__ : Any = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
snake_case__ : List[Any] = True
else:
snake_case__ : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__magic_name__ ) , __magic_name__ )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
A_ : Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A_ : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
A_ : Optional[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A_ : Union[str, Any] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
return (3 * x**2) - (6 * y)
A_ : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A_ : Optional[int] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F'{local_min.score()}'
)
A_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A_ : Union[str, Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F'{local_min.score()}'
)
| 38 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__magic_name__ ) / len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 1 |
'''simple docstring'''
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : str ) -> None:
'''simple docstring'''
snake_case__ : str = {"""Content-Type""": """application/json"""}
snake_case__ : List[str] = requests.post(__magic_name__ , json={"""text""": message_body} , headers=__magic_name__ )
if response.status_code != 2_00:
snake_case__ : List[str] = (
"""Request to slack returned an error """
f"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(__magic_name__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 38 |
'''simple docstring'''
from __future__ import annotations
A_ : str = "Muhammad Umer Farooq"
A_ : Optional[Any] = "MIT"
A_ : int = "1.0.0"
A_ : int = "Muhammad Umer Farooq"
A_ : int = "[email protected]"
A_ : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__()
snake_case__ : list[str] = []
snake_case__ : List[Any] = domain
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case__ : str = parse.urljoin(self.domain , __SCREAMING_SNAKE_CASE )
self.urls.append(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(__magic_name__ ).split(""".""" )[-2:] )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return parse.urlparse(__magic_name__ ).netloc
def UpperCamelCase__ ( __magic_name__ : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
snake_case__ : List[str] = get_domain_name(__magic_name__ )
# Initialize the parser
snake_case__ : Optional[Any] = Parser(__magic_name__ )
try:
# Open URL
snake_case__ : Any = requests.get(__magic_name__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case__ : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case__ : Tuple = requests.get(__magic_name__ )
# Get the valid email.
snake_case__ : List[str] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__magic_name__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__magic_name__ )
if __name__ == "__main__":
A_ : str = emails_from_url("https://github.com")
print(F'{len(emails)} emails found:')
print("\n".join(sorted(emails)))
| 38 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=False ) -> Tuple:
'''simple docstring'''
snake_case__ : int = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : int = """"""
else:
snake_case__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = dct.pop(__magic_name__ )
snake_case__ : Dict = val
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int=False ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=__magic_name__ , )
snake_case__ : Optional[int] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=3_84 , num_labels=10_00 )
snake_case__ : Union[str, Any] = False
# load original model from timm
snake_case__ : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case__ : int = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : str = """huggingface/label-files"""
snake_case__ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case__ : Dict = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : str = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case__ : Union[str, Any] = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case__ : Optional[Any] = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case__ : Union[str, Any] = transform.transforms
snake_case__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case__ : Any = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ : Any = prepare_img()
snake_case__ : int = transform(__magic_name__ ).unsqueeze(0 )
snake_case__ : List[str] = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case__ : Optional[Any] = model(__magic_name__ )
snake_case__ : Union[str, Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
snake_case__ : Dict = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1E-3 )
else:
snake_case__ : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 38 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
snake_case__ , snake_case__ : Dict = head.next, head
while fast and fast.next:
snake_case__ : Any = fast.next.next
snake_case__ : int = slow.next
snake_case__ : Dict = slow.next
snake_case__ : List[str] = None # Don't forget here! But forget still works!
# reverse the second part
snake_case__ : Tuple = None
while second:
snake_case__ : Tuple = second.next
snake_case__ : Any = node
snake_case__ : str = second
snake_case__ : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case__ : List[Any] = node.next
snake_case__ : int = head.next
return True
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[Any]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case__ : List[Any] = head
while fast and fast.next:
snake_case__ , snake_case__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case__ : Tuple = [slow.val]
while slow.next:
snake_case__ : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case__ : str = cur.next
return True
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
snake_case__ : int = {}
snake_case__ : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
snake_case__ : Tuple = [pos]
snake_case__ : Optional[Any] = head.next
pos += 1
snake_case__ : int = pos - 1
snake_case__ : str = 0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
snake_case__ : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 38 | 1 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
A_ : List[str] = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
A_ : List[str] = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
A_ : Any = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : str = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : Optional[Any] = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
A_ : int = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : List[str] = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
A_ : Union[str, Any] = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : int = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
A_ : Optional[int] = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : Any = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
A_ : Tuple = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : Any = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
A_ : Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
A_ : Any = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
A_ : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
A_ : List[Any] = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
A_ : Tuple = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
A_ : str = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
A_ : List[str] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : Dict = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
A_ : Any = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
A_ : str = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
A_ : Dict = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : List[str] = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
A_ : List[str] = ""
A_ : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
A_ : Tuple = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : Tuple = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
assert ReadMe.from_string(__magic_name__ , __magic_name__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : List[Any] ) -> Any:
'''simple docstring'''
with pytest.raises(__magic_name__ , match=re.escape(expected_error.format(path="""root""" ) ) ):
snake_case__ : Optional[Any] = ReadMe.from_string(__magic_name__ , __magic_name__ )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(__magic_name__ , match=re.escape(expected_error.format(path="""root""" ) ) ):
ReadMe.from_string(__magic_name__ , __magic_name__ )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> str:
'''simple docstring'''
ReadMe.from_string(__magic_name__ , __magic_name__ , suppress_parsing_errors=__magic_name__ )
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Dict = Path(__magic_name__ ) / """README.md"""
with open(__magic_name__ , """w+""" ) as readme_file:
readme_file.write(__magic_name__ )
snake_case__ : Optional[Any] = ReadMe.from_readme(__magic_name__ , __magic_name__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : Any ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Any = Path(__magic_name__ ) / """README.md"""
with open(__magic_name__ , """w+""" ) as readme_file:
readme_file.write(__magic_name__ )
snake_case__ : Tuple = expected_error.format(path=__magic_name__ )
with pytest.raises(__magic_name__ , match=re.escape(__magic_name__ ) ):
snake_case__ : Tuple = ReadMe.from_readme(__magic_name__ , __magic_name__ )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : Tuple ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Any = Path(__magic_name__ ) / """README.md"""
with open(__magic_name__ , """w+""" ) as readme_file:
readme_file.write(__magic_name__ )
snake_case__ : List[str] = expected_error.format(path=__magic_name__ )
with pytest.raises(__magic_name__ , match=re.escape(__magic_name__ ) ):
ReadMe.from_readme(__magic_name__ , __magic_name__ )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : int = Path(__magic_name__ ) / """README.md"""
with open(__magic_name__ , """w+""" ) as readme_file:
readme_file.write(__magic_name__ )
ReadMe.from_readme(__magic_name__ , __magic_name__ , suppress_parsing_errors=__magic_name__ )
| 38 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ : str = 250004
A_ : str = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = MBartTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case__ : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : int = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : Tuple = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case__ : Any = tempfile.mkdtemp()
snake_case__ : Optional[int] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : List[Any] = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : Union[str, Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case__ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = '''facebook/mbart-large-en-ro'''
lowerCamelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase__ = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __UpperCamelCase ( cls ):
snake_case__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
snake_case__ : Any = 1
return cls
def __UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
snake_case__ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case__ : List[Any] = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = 1_0
snake_case__ : int = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = tempfile.mkdtemp()
snake_case__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = MBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case__ : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case__ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
snake_case__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1_0 , return_tensors="""pt""" )
snake_case__ : str = targets["""input_ids"""]
snake_case__ : Optional[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 38 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 1_8}
snake_case__ : Tuple = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : Dict = parent
snake_case__ : str = batch_size
snake_case__ : str = num_channels
snake_case__ : Optional[int] = num_frames
snake_case__ : Tuple = image_size
snake_case__ : List[str] = min_resolution
snake_case__ : int = max_resolution
snake_case__ : str = do_resize
snake_case__ : Union[str, Any] = size
snake_case__ : str = do_normalize
snake_case__ : int = image_mean
snake_case__ : List[str] = image_std
snake_case__ : Any = crop_size
def __UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = VivitImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = VivitImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_center_crop""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
snake_case__ : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for video in video_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
snake_case__ : Any = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : int = prepare_video_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for video in video_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : int = prepare_video_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for video in video_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
snake_case__ : Any = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : Tuple = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 38 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''bit'''
lowerCamelCase__ = ['''preactivation''', '''bottleneck''']
lowerCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="preactivation" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ : Tuple = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
snake_case__ : List[str] = num_channels
snake_case__ : Tuple = embedding_size
snake_case__ : str = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : List[Any] = layer_type
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = global_padding
snake_case__ : List[str] = num_groups
snake_case__ : str = drop_path_rate
snake_case__ : List[Any] = embedding_dynamic_padding
snake_case__ : List[str] = output_stride
snake_case__ : Dict = width_factor
snake_case__ : List[str] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int ) -> str:
'''simple docstring'''
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(__magic_name__ ):
for j in range(__magic_name__ ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> str:
'''simple docstring'''
snake_case__ : Optional[Any] = [[float("""inf""" ) for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
for j in range(__magic_name__ ):
snake_case__ : str = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__magic_name__ ):
# looping through rows of graph array
for i in range(__magic_name__ ):
# looping through columns of graph array
for j in range(__magic_name__ ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
snake_case__ : int = dist[i][k] + dist[k][j]
_print_dist(__magic_name__ , __magic_name__ )
return dist, v
if __name__ == "__main__":
A_ : Optional[int] = int(input("Enter number of vertices: "))
A_ : str = int(input("Enter number of edges: "))
A_ : List[str] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
A_ : Tuple = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
A_ : Any = int(input("Enter source:"))
A_ : List[str] = int(input("Enter destination:"))
A_ : int = float(input("Enter weight:"))
A_ : int = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 38 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=False ) -> Tuple:
'''simple docstring'''
snake_case__ : int = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : int = """"""
else:
snake_case__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = dct.pop(__magic_name__ )
snake_case__ : Dict = val
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int=False ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=__magic_name__ , )
snake_case__ : Optional[int] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=3_84 , num_labels=10_00 )
snake_case__ : Union[str, Any] = False
# load original model from timm
snake_case__ : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case__ : int = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : str = """huggingface/label-files"""
snake_case__ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case__ : Dict = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : str = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case__ : Union[str, Any] = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case__ : Optional[Any] = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case__ : Union[str, Any] = transform.transforms
snake_case__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case__ : Any = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ : Any = prepare_img()
snake_case__ : int = transform(__magic_name__ ).unsqueeze(0 )
snake_case__ : List[str] = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case__ : Optional[Any] = model(__magic_name__ )
snake_case__ : Union[str, Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
snake_case__ : Dict = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1E-3 )
else:
snake_case__ : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 38 | 1 |
'''simple docstring'''
from math import factorial, pi
def UpperCamelCase__ ( __magic_name__ : float , __magic_name__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(__magic_name__ , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(__magic_name__ , __magic_name__ ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
snake_case__ : List[str] = float(__magic_name__ )
snake_case__ : List[Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__magic_name__ ) )
def UpperCamelCase__ ( __magic_name__ : float , __magic_name__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(__magic_name__ , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(__magic_name__ , __magic_name__ ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
snake_case__ : Tuple = float(__magic_name__ )
snake_case__ : List[str] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 38 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 42
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE=True , ):
super().__init__()
snake_case__ : str = layers_per_block
snake_case__ : int = torch.nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : List[Any] = None
snake_case__ : List[Any] = nn.ModuleList([] )
# down
snake_case__ : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = output_channel
snake_case__ : Union[str, Any] = block_out_channels[i]
snake_case__ : int = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : str = get_down_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
snake_case__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# out
snake_case__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : str = 2 * out_channels if double_z else out_channels
snake_case__ : int = nn.Convad(block_out_channels[-1] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : Union[str, Any] = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = x
snake_case__ : int = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
snake_case__ : List[str] = down_block(__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
snake_case__ : Any = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : str = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE="group" , ):
super().__init__()
snake_case__ : Any = layers_per_block
snake_case__ : Optional[Any] = nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : Union[str, Any] = None
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : Optional[int] = in_channels if norm_type == """spatial""" else None
# mid
snake_case__ : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# up
snake_case__ : List[Any] = list(reversed(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = output_channel
snake_case__ : Optional[Any] = reversed_block_out_channels[i]
snake_case__ : List[str] = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : int = get_up_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , resnet_time_scale_shift=__SCREAMING_SNAKE_CASE , )
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : int = output_channel
# out
if norm_type == "spatial":
snake_case__ : List[Any] = SpatialNorm(block_out_channels[0] , __SCREAMING_SNAKE_CASE )
else:
snake_case__ : Any = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : Union[str, Any] = nn.Convad(block_out_channels[0] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : int = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Union[str, Any] = z
snake_case__ : Any = self.conv_in(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
snake_case__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
snake_case__ : int = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : List[Any] = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : Dict = up_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
snake_case__ : Optional[Any] = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : str = self.conv_norm_out(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="random" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ):
super().__init__()
snake_case__ : int = n_e
snake_case__ : Optional[int] = vq_embed_dim
snake_case__ : int = beta
snake_case__ : Optional[int] = legacy
snake_case__ : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
snake_case__ : List[str] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
snake_case__ : Optional[Any] = self.used.shape[0]
snake_case__ : List[str] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
snake_case__ : Dict = self.re_embed
snake_case__ : List[str] = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
snake_case__ : Union[str, Any] = n_e
snake_case__ : str = sane_index_shape
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : Dict = inds.reshape(ishape[0] , -1 )
snake_case__ : Any = self.used.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = (inds[:, :, None] == used[None, None, ...]).long()
snake_case__ : List[Any] = match.argmax(-1 )
snake_case__ : List[str] = match.sum(2 ) < 1
if self.unknown_index == "random":
snake_case__ : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
snake_case__ : Optional[Any] = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : int = inds.reshape(ishape[0] , -1 )
snake_case__ : Optional[int] = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
snake_case__ : List[Any] = 0 # simply set to zero
snake_case__ : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
# reshape z -> (batch, height, width, channel) and flatten
snake_case__ : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
snake_case__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
snake_case__ : Dict = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
snake_case__ : Union[str, Any] = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
snake_case__ : Tuple = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
snake_case__ : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
snake_case__ : Any = z + (z_q - z).detach()
# reshape back to match original input shape
snake_case__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
snake_case__ : List[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
snake_case__ : str = self.remap_to_used(__SCREAMING_SNAKE_CASE )
snake_case__ : str = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
snake_case__ : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
snake_case__ : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
snake_case__ : Optional[int] = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
snake_case__ : int = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
snake_case__ : str = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
snake_case__ : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
snake_case__ : Tuple = parameters
snake_case__ , snake_case__ : Any = torch.chunk(__SCREAMING_SNAKE_CASE , 2 , dim=1 )
snake_case__ : Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
snake_case__ : Optional[int] = deterministic
snake_case__ : Optional[int] = torch.exp(0.5 * self.logvar )
snake_case__ : Any = torch.exp(self.logvar )
if self.deterministic:
snake_case__ : List[str] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE = None ):
# make sure sample is on the same device as the parameters and has same dtype
snake_case__ : Dict = randn_tensor(
self.mean.shape , generator=__SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
snake_case__ : Optional[int] = self.mean + self.std * sample
return x
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
snake_case__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
return self.mean
| 38 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''BlipImageProcessor'''
lowerCamelCase__ = '''AutoTokenizer'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = False
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.image_processor
def __call__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
snake_case__ : Optional[int] = self.tokenizer
snake_case__ : Union[str, Any] = self.tokenizer(
text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , return_special_tokens_mask=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_length=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
return text_encoding
# add pixel_values
snake_case__ : Any = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
if text is not None:
snake_case__ : Any = self.tokenizer(
text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , return_special_tokens_mask=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_length=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
snake_case__ : Any = None
if text_encoding is not None:
encoding_image_processor.update(__SCREAMING_SNAKE_CASE )
return encoding_image_processor
def __UpperCamelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __UpperCamelCase ( self ):
snake_case__ : List[str] = self.tokenizer.model_input_names
snake_case__ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 38 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1, 3_8_4, 2_4, 2_4] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : str = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : str = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : str = backbone_out_indices
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Dict = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : str = backbone_featmap_shape
snake_case__ : List[Any] = scope
snake_case__ : Optional[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
snake_case__ : List[Any] = (image_size // patch_size) ** 2
snake_case__ : Union[str, Any] = num_patches + 1
def __UpperCamelCase ( self ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
snake_case__ : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [9_6, 1_9_2, 3_8_4, 7_6_8],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = DPTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : str = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : Dict = DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = DPTModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
snake_case__ : Optional[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = False
snake_case__ : str = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
snake_case__ : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(config=__SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
snake_case__ : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
snake_case__ : Optional[int] = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase ( self ):
pass
@slow
def __UpperCamelCase ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
snake_case__ : List[str] = DPTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = """add"""
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
snake_case__ : Union[str, Any] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = outputs.predicted_depth
# verify the predicted depth
snake_case__ : Any = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Dict = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=None , __magic_name__ : List[str]=None ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = True
while ask_again:
snake_case__ : Optional[Any] = input(__magic_name__ )
try:
if default is not None and len(__magic_name__ ) == 0:
return default
return convert_value(__magic_name__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Any=[] , __magic_name__ : Optional[int]=None , __magic_name__ : int=0 ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = BulletMenu(__magic_name__ , __magic_name__ )
snake_case__ : Optional[Any] = menu.run(default_choice=__magic_name__ )
return convert_value(__magic_name__ ) if convert_value is not None else result
def UpperCamelCase__ ( __magic_name__ : Any ) -> int:
'''simple docstring'''
snake_case__ : Tuple = int(__magic_name__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase__ ( __magic_name__ : str ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = int(__magic_name__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = int(__magic_name__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Tuple:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __snake_case ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 38 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case__ : int = botoa.client("""iam""" )
snake_case__ : Union[str, Any] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__magic_name__ , AssumeRolePolicyDocument=json.dumps(__magic_name__ , indent=2 ) )
snake_case__ : Dict = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__magic_name__ , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(__magic_name__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def UpperCamelCase__ ( __magic_name__ : Any ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = botoa.client("""iam""" )
return iam_client.get_role(RoleName=__magic_name__ )["Role"]["Arn"]
def UpperCamelCase__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __magic_name__ , )
snake_case__ : List[Any] = None
if credentials_configuration == 0:
snake_case__ : Dict = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
snake_case__ : List[str] = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
snake_case__ : List[str] = _ask_field("""AWS Access Key ID: """ )
snake_case__ : int = aws_access_key_id
snake_case__ : Optional[Any] = _ask_field("""AWS Secret Access Key: """ )
snake_case__ : List[str] = aws_secret_access_key
snake_case__ : Tuple = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
snake_case__ : Optional[int] = aws_region
snake_case__ : int = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __magic_name__ , )
if role_management == 0:
snake_case__ : Optional[Any] = _ask_field("""Enter your IAM role name: """ )
else:
snake_case__ : Optional[int] = """accelerate_sagemaker_execution_role"""
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(__magic_name__ )
snake_case__ : Dict = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Any = None
if is_custom_docker_image:
snake_case__ : str = _ask_field("""Enter your Docker image: """ , lambda __magic_name__ : str(__magic_name__ ).lower() )
snake_case__ : Tuple = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : List[Any] = None
if is_sagemaker_inputs_enabled:
snake_case__ : str = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Optional[int] = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Optional[Any] = None
if is_sagemaker_metrics_enabled:
snake_case__ : List[Any] = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Tuple = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
snake_case__ : Any = {}
snake_case__ : List[Any] = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
snake_case__ : str = """dynamo_"""
snake_case__ : Tuple = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case__ : List[str] = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
snake_case__ : str = _ask_options(
"""Which mode do you want to use?""" , __magic_name__ , lambda __magic_name__ : TORCH_DYNAMO_MODES[int(__magic_name__ )] , default="""default""" , )
snake_case__ : Union[str, Any] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : str = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Dict = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
snake_case__ : List[str] = _ask_options(
__magic_name__ , __magic_name__ , lambda __magic_name__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__magic_name__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case__ : Optional[int] = _ask_field(__magic_name__ , lambda __magic_name__ : str(__magic_name__ ).lower() , default="""ml.p3.2xlarge""" )
snake_case__ : Dict = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case__ : Optional[Any] = _ask_field(
"""How many machines do you want use? [1]: """ , __magic_name__ , default=1 , )
snake_case__ : Union[str, Any] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=__magic_name__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__magic_name__ , use_cpu=__magic_name__ , dynamo_config=__magic_name__ , eca_instance_type=__magic_name__ , profile=__magic_name__ , region=__magic_name__ , iam_role_name=__magic_name__ , mixed_precision=__magic_name__ , num_machines=__magic_name__ , sagemaker_inputs_file=__magic_name__ , sagemaker_metrics_file=__magic_name__ , )
| 38 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : list[int] , __magic_name__ : list[int] , __magic_name__ : int ) -> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__magic_name__ ) )
def UpperCamelCase__ ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : int ) -> bool:
'''simple docstring'''
if index == len(__magic_name__ ):
return True
# Recursive Step
for i in range(__magic_name__ ):
if valid_coloring(graph[index] , __magic_name__ , __magic_name__ ):
# Color current vertex
snake_case__ : Dict = i
# Validate coloring
if util_color(__magic_name__ , __magic_name__ , __magic_name__ , index + 1 ):
return True
# Backtrack
snake_case__ : Tuple = -1
return False
def UpperCamelCase__ ( __magic_name__ : list[list[int]] , __magic_name__ : int ) -> list[int]:
'''simple docstring'''
snake_case__ : Dict = [-1] * len(__magic_name__ )
if util_color(__magic_name__ , __magic_name__ , __magic_name__ , 0 ):
return colored_vertices
return []
| 38 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase__ ( __magic_name__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
snake_case__ : Union[str, Any] = f"https://www.amazon.in/laptop/s?k={product}"
snake_case__ : List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
snake_case__ : int = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
snake_case__ : Optional[int] = item.ha.text
snake_case__ : Any = """https://www.amazon.in/""" + item.ha.a["""href"""]
snake_case__ : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
snake_case__ : Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
snake_case__ : Optional[int] = """Not available"""
try:
snake_case__ : Tuple = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
snake_case__ : Optional[Any] = """"""
try:
snake_case__ : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_00 )
except ValueError:
snake_case__ : List[Any] = float("""nan""" )
except AttributeError:
pass
snake_case__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : List[Any] = """ """
snake_case__ : Union[str, Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : int = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 38 | 1 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_0_2_4 , __SCREAMING_SNAKE_CASE=1_0_2_4 , __SCREAMING_SNAKE_CASE=3.6 ):
snake_case__ : Union[str, Any] = tokenizer
snake_case__ : Optional[int] = tokenizer.bos_token_id
snake_case__ : List[str] = dataset
snake_case__ : Dict = seq_length
snake_case__ : Tuple = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
snake_case__ : List[Any] = iter(self.dataset )
snake_case__ : Tuple = True
while more_examples:
snake_case__ , snake_case__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__SCREAMING_SNAKE_CASE )["""content"""] )
buffer_len += len(buffer[-1] )
except StopIteration:
snake_case__ : Dict = False
break
snake_case__ : Any = tokenizer(__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE )["""input_ids"""]
snake_case__ : List[Any] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , self.seq_length ):
snake_case__ : List[str] = all_token_ids[i : i + self.seq_length]
if len(__SCREAMING_SNAKE_CASE ) == self.seq_length:
yield torch.tensor(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> int:
'''simple docstring'''
snake_case__ : int = {"""streaming""": True}
snake_case__ : List[Any] = load_dataset(args.dataset_name , split="""train""" , **__magic_name__ )
snake_case__ : Dict = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length )
snake_case__ : Optional[int] = DataLoader(__magic_name__ , batch_size=args.batch_size )
return eval_dataloader
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Optional[Any]:
'''simple docstring'''
model.eval()
snake_case__ : int = []
for step, batch in enumerate(__magic_name__ ):
with torch.no_grad():
snake_case__ : Optional[Any] = model(__magic_name__ , labels=__magic_name__ )
snake_case__ : Union[str, Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__magic_name__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
snake_case__ : Optional[Any] = torch.mean(torch.cat(__magic_name__ ) )
try:
snake_case__ : List[Any] = torch.exp(__magic_name__ )
except OverflowError:
snake_case__ : List[str] = float("""inf""" )
return loss.item(), perplexity.item()
# Setup Accelerator
A_ : List[str] = Accelerator()
# Parse configuration
A_ : List[str] = HfArgumentParser(EvaluationArguments)
A_ : Optional[int] = parser.parse_args()
set_seed(args.seed)
# Logging
A_ : Union[str, Any] = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
A_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
A_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
A_ : Dict = create_dataloader(args)
# Prepare everything with our `accelerator`.
A_ , A_ : Union[str, Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
A_ , A_ : Optional[Any] = evaluate(args)
logger.info(F'loss/eval: {eval_loss}, perplexity: {perplexity}')
| 38 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LongformerTokenizer
lowerCamelCase__ = True
lowerCamelCase__ = LongformerTokenizerFast
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Optional[int] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Any = {"""unk_token""": """<unk>"""}
snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = """lower newer"""
snake_case__ : Dict = """lower newer"""
return input_text, output_text
def __UpperCamelCase ( self ):
snake_case__ : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = """lower newer"""
snake_case__ : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokens + [tokenizer.unk_token]
snake_case__ : List[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
snake_case__ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : int = """Encode this sequence."""
snake_case__ : Union[str, Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
snake_case__ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
snake_case__ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
snake_case__ : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
snake_case__ : Dict = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Encode <mask> sequence"""
snake_case__ : Tuple = """Encode <mask>sequence"""
snake_case__ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : str = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """A, <mask> AllenNLP sentence."""
snake_case__ : str = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __UpperCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case__ : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""trim_offsets"""] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ : Any = f"{text_of_1_token} {text_of_1_token}"
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 38 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
A_ : Optional[int] = None
A_ : List[Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
A_ : Any = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
A_ : Tuple = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
A_ : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = []
lowerCamelCase__ = []
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : List[Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
snake_case__ : Tuple = vocab_file
snake_case__ : List[str] = False if not self.vocab_file else True
snake_case__ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case__ : List[Any] = {
lang_code: self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case__ : Any = src_lang if src_lang is not None else """en_XX"""
snake_case__ : Optional[Any] = self.convert_tokens_to_ids(self._src_lang )
snake_case__ : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
snake_case__ : Tuple = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case__ : List[Any] = src_lang
snake_case__ : str = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tgt_lang_id
return inputs
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
snake_case__ : Union[str, Any] = src_lang
snake_case__ : int = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : int = []
snake_case__ : Dict = [self.eos_token_id, self.cur_lang_code]
snake_case__ : int = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case__ : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case__ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = []
snake_case__ : List[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case__ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case__ : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case__ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
snake_case__ : List[str] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 38 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Any = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''resnet'''
lowerCamelCase__ = ['''basic''', '''bottleneck''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
snake_case__ : List[Any] = num_channels
snake_case__ : str = embedding_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : Dict = depths
snake_case__ : List[Any] = layer_type
snake_case__ : int = hidden_act
snake_case__ : Union[str, Any] = downsample_in_first_stage
snake_case__ : Dict = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-3
| 38 | 1 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
A_ : Any = logging.get_logger(__name__)
A_ : Dict = "Hello world! cécé herlolip"
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : bool ) -> int:
'''simple docstring'''
snake_case__ : str = FairseqRobertaModel.from_pretrained(__magic_name__ )
roberta.eval() # disable dropout
snake_case__ : str = roberta.model.encoder.sentence_encoder
snake_case__ : Optional[int] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
snake_case__ : Optional[int] = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , __magic_name__ )
snake_case__ : List[str] = XLMRobertaXLForSequenceClassification(__magic_name__ ) if classification_head else XLMRobertaXLForMaskedLM(__magic_name__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case__ : Tuple = roberta_sent_encoder.embed_tokens.weight
snake_case__ : List[Any] = roberta_sent_encoder.embed_positions.weight
snake_case__ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case__ : int = roberta_sent_encoder.layer_norm.weight
snake_case__ : Union[str, Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case__ : BertLayer = model.roberta.encoder.layer[i]
snake_case__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
snake_case__ : RobertaAttention = layer.attention
snake_case__ : List[Any] = roberta_layer.self_attn_layer_norm.weight
snake_case__ : int = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case__ : Dict = roberta_layer.self_attn.q_proj.weight
snake_case__ : List[Any] = roberta_layer.self_attn.q_proj.bias
snake_case__ : List[Any] = roberta_layer.self_attn.k_proj.weight
snake_case__ : str = roberta_layer.self_attn.k_proj.bias
snake_case__ : str = roberta_layer.self_attn.v_proj.weight
snake_case__ : Tuple = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case__ : List[Any] = roberta_layer.self_attn.out_proj.weight
snake_case__ : Optional[Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case__ : Optional[int] = roberta_layer.final_layer_norm.weight
snake_case__ : List[str] = roberta_layer.final_layer_norm.bias
# intermediate
snake_case__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case__ : Union[str, Any] = roberta_layer.fca.weight
snake_case__ : List[str] = roberta_layer.fca.bias
# output
snake_case__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case__ : str = roberta_layer.fca.weight
snake_case__ : int = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case__ : List[str] = roberta.model.classification_heads["""mnli"""].dense.weight
snake_case__ : str = roberta.model.classification_heads["""mnli"""].dense.bias
snake_case__ : Optional[int] = roberta.model.classification_heads["""mnli"""].out_proj.weight
snake_case__ : Tuple = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
snake_case__ : Union[str, Any] = roberta.model.encoder.lm_head.dense.weight
snake_case__ : int = roberta.model.encoder.lm_head.dense.bias
snake_case__ : int = roberta.model.encoder.lm_head.layer_norm.weight
snake_case__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias
snake_case__ : Dict = roberta.model.encoder.lm_head.weight
snake_case__ : Any = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case__ : torch.Tensor = roberta.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1
snake_case__ : int = model(__magic_name__ )[0]
if classification_head:
snake_case__ : str = roberta.model.classification_heads["""mnli"""](roberta.extract_features(__magic_name__ ) )
else:
snake_case__ : List[str] = roberta.model(__magic_name__ )[0]
print(our_output.shape , their_output.shape )
snake_case__ : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
snake_case__ : Tuple = torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(__magic_name__ ).mkdir(parents=__magic_name__ , exist_ok=__magic_name__ )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
A_ : Optional[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 38 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 38 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
raise NotImplementedError()
@abstractmethod
def __UpperCamelCase ( self ):
raise NotImplementedError()
| 38 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
snake_case__ : str = []
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_init_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_evaluate""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_predict""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_save""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_log""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_prediction_step""" )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = tempfile.mkdtemp()
def __UpperCamelCase ( self ):
shutil.rmtree(self.output_dir )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : List[Any] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionModelConfig(a=__SCREAMING_SNAKE_CASE , b=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionPreTrainedModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = TrainingArguments(self.output_dir , disable_tqdm=__SCREAMING_SNAKE_CASE , report_to=[] , **__SCREAMING_SNAKE_CASE )
return Trainer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , callbacks=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case__ : Tuple = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = len(trainer.get_eval_dataloader() )
snake_case__ : Any = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_trainer()
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Optional[Any] = self.get_trainer(disable_tqdm=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.get_trainer()
snake_case__ : List[str] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 38 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A_ : Optional[int] = {"tokenization_tapex": ["TapexTokenizer"]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 38 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ):
snake_case__ : Any = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : Tuple = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : Union[str, Any] = image_std
def __UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : str = DPTImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 38 | 1 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
A_ : Dict = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = eval_examples
snake_case__ : List[str] = post_process_function
snake_case__ : Optional[Any] = quant_trainer_args
snake_case__ : List[str] = 1_2_8 # default number of calibration samples
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
snake_case__ : List[str] = calib_dataset if calib_dataset is not None else self.calib_dataset
snake_case__ : Dict = self._remove_unused_columns(__SCREAMING_SNAKE_CASE , description="""Calibration""" )
return DataLoader(
__SCREAMING_SNAKE_CASE , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : List[Any] = self.train_dataset if calib_dataset is None else calib_dataset
snake_case__ : Dict = self.get_calib_dataloader(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.model
quant_trainer.configure_model(__SCREAMING_SNAKE_CASE , self.quant_trainer_args , calib=__SCREAMING_SNAKE_CASE )
model.eval()
quant_trainer.enable_calibration(__SCREAMING_SNAKE_CASE )
logger.info("""***** Running calibration *****""" )
logger.info(f" Num examples = {self.calib_num}" )
logger.info(f" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(__SCREAMING_SNAKE_CASE ):
# Prediction step
snake_case__ , snake_case__ , snake_case__ : Any = self.prediction_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prediction_loss_only=__SCREAMING_SNAKE_CASE )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__SCREAMING_SNAKE_CASE , self.quant_trainer_args )
snake_case__ : Optional[Any] = model
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ):
snake_case__ : Dict = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case__ : Dict = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
snake_case__ : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case__ : Tuple = self.compute_metrics
snake_case__ : Tuple = None
snake_case__ : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case__ : Optional[Any] = eval_loop(
__SCREAMING_SNAKE_CASE , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , )
finally:
snake_case__ : List[str] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
snake_case__ : Any = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions )
snake_case__ : str = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
snake_case__ : str = metrics.pop(__SCREAMING_SNAKE_CASE )
self.log(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Union[str, Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case__ : int = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ):
snake_case__ : int = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case__ : Dict = self.compute_metrics
snake_case__ : List[str] = None
snake_case__ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case__ : str = eval_loop(
__SCREAMING_SNAKE_CASE , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , )
finally:
snake_case__ : Union[str, Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case__ : List[str] = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , """predict""" )
snake_case__ : Any = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
snake_case__ : Optional[int] = metrics.pop(__SCREAMING_SNAKE_CASE )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE="./" ):
snake_case__ : List[Any] = self.eval_dataset
snake_case__ : Union[str, Any] = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = next(iter(__SCREAMING_SNAKE_CASE ) )
# saving device - to make it consistent
snake_case__ : Union[str, Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
snake_case__ : Tuple = tuple(v.to(__SCREAMING_SNAKE_CASE ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
snake_case__ : Optional[Any] = True
snake_case__ : Optional[Any] = self.model.to(__SCREAMING_SNAKE_CASE )
model.eval()
model.float()
snake_case__ : List[Any] = model.module if hasattr(__SCREAMING_SNAKE_CASE , """module""" ) else model
quant_trainer.configure_model(__SCREAMING_SNAKE_CASE , self.quant_trainer_args )
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , """model.onnx""" )
logger.info(f"exporting model to {output_model_file}" )
snake_case__ : List[str] = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , export_params=__SCREAMING_SNAKE_CASE , opset_version=1_3 , do_constant_folding=__SCREAMING_SNAKE_CASE , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=__SCREAMING_SNAKE_CASE , )
logger.info("""onnx export finished""" )
| 38 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """num_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1_6, 4_8, 9_6] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 1_0] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ):
snake_case__ : List[str] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : List[Any] = patch_sizes
snake_case__ : Optional[int] = patch_stride
snake_case__ : Optional[Any] = patch_padding
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : Dict = num_labels
snake_case__ : Optional[Any] = num_channels
snake_case__ : Optional[Any] = embed_dim
snake_case__ : Optional[int] = num_heads
snake_case__ : Optional[int] = stride_kv
snake_case__ : int = depth
snake_case__ : Optional[Any] = cls_token
snake_case__ : List[Any] = attention_drop_rate
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFCvtModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = (self.image_size, self.image_size)
snake_case__ , snake_case__ : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case__ : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case__ : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : str = TFCvtForImageClassification(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtModelTester(self )
snake_case__ : Any = TFCvtConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def __UpperCamelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __UpperCamelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__SCREAMING_SNAKE_CASE )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[int] = outputs.hidden_states
snake_case__ : Tuple = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFCvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : int = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 1 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = CodeGenTokenizer
lowerCamelCase__ = CodeGenTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = {'''add_prefix_space''': True}
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
snake_case__ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : str = {"""unk_token""": """<unk>"""}
snake_case__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = """lower newer"""
snake_case__ : Optional[Any] = """lower newer"""
return input_text, output_text
def __UpperCamelCase ( self ):
snake_case__ : Tuple = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : str = """lower newer"""
snake_case__ : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : Union[str, Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokens + [tokenizer.unk_token]
snake_case__ : Any = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
snake_case__ : int = self.get_tokenizer()
snake_case__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = """lower newer"""
# Testing tokenization
snake_case__ : Optional[Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
snake_case__ : Any = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
snake_case__ : str = self.get_rust_tokenizer(add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : int = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing the unknown token
snake_case__ : Dict = tokens + [rust_tokenizer.unk_token]
snake_case__ : int = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# Simple input
snake_case__ : str = """This is a simple input"""
snake_case__ : Any = ["""This is a simple input 1""", """This is a simple input 2"""]
snake_case__ : Optional[int] = ("""This is a simple input""", """This is a pair""")
snake_case__ : Union[str, Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Simple input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Simple input
self.assertRaises(
__SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" , )
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" )
# Pair input
self.assertRaises(
__SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" , )
def __UpperCamelCase ( self ):
snake_case__ : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
snake_case__ : int = """This is a simple input"""
snake_case__ : List[Any] = ["""This is a simple input looooooooong""", """This is a simple input"""]
snake_case__ : List[str] = ("""This is a simple input""", """This is a pair""")
snake_case__ : List[str] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
snake_case__ : Optional[Any] = tokenizer.pad_token_id
snake_case__ : int = tokenizer(__SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=3_0 , return_tensors="""np""" )
snake_case__ : str = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncate=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
snake_case__ : Tuple = tokenizer(*__SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=6_0 , return_tensors="""np""" )
snake_case__ : Optional[Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncate=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __UpperCamelCase ( self ):
snake_case__ : str = """$$$"""
snake_case__ : Dict = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__SCREAMING_SNAKE_CASE , add_bos_token=__SCREAMING_SNAKE_CASE )
snake_case__ : int = """This is a simple input"""
snake_case__ : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
snake_case__ : List[str] = tokenizer.bos_token_id
snake_case__ : List[str] = tokenizer(__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer(__SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , __SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case__ : str = tokenizer.decode(out_s.input_ids )
snake_case__ : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
snake_case__ : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
snake_case__ : List[str] = """\nif len_a > len_b: result = a\nelse: result = b"""
snake_case__ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
snake_case__ : Any = tokenizer.decode(__SCREAMING_SNAKE_CASE , truncate_before_pattern=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
| 38 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
snake_case__ : int = [[1, 2, 4], [1, 2, 3, 4]]
snake_case__ : Any = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCamelCase ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
snake_case__ : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def __UpperCamelCase ( self ):
snake_case__ : List[str] = [[1, 2, 3], [1, 2, 4]]
snake_case__ : Optional[int] = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : Any = dc.update(1 )
snake_case__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : Tuple = dc.update(2 )
snake_case__ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(3 )
snake_case__ : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case__ : int = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 38 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ : Dict = logging.get_logger(__name__)
A_ : Any = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''detr'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_0_0 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_0_4_8 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_0_4_8 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="sine" , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case__ : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = backbone_config.get("""model_type""" )
snake_case__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
snake_case__ : Dict = config_class.from_dict(__SCREAMING_SNAKE_CASE )
# set timm attributes to None
snake_case__ , snake_case__ , snake_case__ : int = None, None, None
snake_case__ : List[Any] = use_timm_backbone
snake_case__ : Optional[int] = backbone_config
snake_case__ : Tuple = num_channels
snake_case__ : str = num_queries
snake_case__ : Optional[int] = d_model
snake_case__ : Tuple = encoder_ffn_dim
snake_case__ : Union[str, Any] = encoder_layers
snake_case__ : List[str] = encoder_attention_heads
snake_case__ : Dict = decoder_ffn_dim
snake_case__ : Any = decoder_layers
snake_case__ : Optional[int] = decoder_attention_heads
snake_case__ : List[Any] = dropout
snake_case__ : List[str] = attention_dropout
snake_case__ : Tuple = activation_dropout
snake_case__ : List[str] = activation_function
snake_case__ : List[str] = init_std
snake_case__ : Optional[Any] = init_xavier_std
snake_case__ : int = encoder_layerdrop
snake_case__ : Any = decoder_layerdrop
snake_case__ : int = encoder_layers
snake_case__ : Dict = auxiliary_loss
snake_case__ : Dict = position_embedding_type
snake_case__ : Tuple = backbone
snake_case__ : str = use_pretrained_backbone
snake_case__ : Union[str, Any] = dilation
# Hungarian matcher
snake_case__ : int = class_cost
snake_case__ : Optional[int] = bbox_cost
snake_case__ : Dict = giou_cost
# Loss coefficients
snake_case__ : List[str] = mask_loss_coefficient
snake_case__ : Optional[Any] = dice_loss_coefficient
snake_case__ : Optional[Any] = bbox_loss_coefficient
snake_case__ : List[Any] = giou_loss_coefficient
snake_case__ : Optional[int] = eos_coefficient
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def __UpperCamelCase ( self ):
return self.encoder_attention_heads
@property
def __UpperCamelCase ( self ):
return self.d_model
@classmethod
def __UpperCamelCase ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return cls(backbone_config=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case__ : Union[str, Any] = self.backbone_config.to_dict()
snake_case__ : Tuple = self.__class__.model_type
return output
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-5
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''segformer'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Any = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : List[str] = patch_sizes
snake_case__ : str = strides
snake_case__ : Optional[int] = mlp_ratios
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : List[Any] = decoder_hidden_size
snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = semantic_loss_ignore_index
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 | 1 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = ['''image_processor''']
lowerCamelCase__ = '''SamImageProcessor'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = self.image_processor
snake_case__ : List[Any] = -1_0
snake_case__ : Any = self.image_processor.size["""longest_edge"""]
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
snake_case__ : Dict = self.image_processor(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# pop arguments that are not used in the foward but used nevertheless
snake_case__ : Union[str, Any] = encoding_image_processor["""original_sizes"""]
if hasattr(__SCREAMING_SNAKE_CASE , """numpy""" ): # Checks if Torch or TF tensor
snake_case__ : Optional[int] = original_sizes.numpy()
snake_case__ , snake_case__ , snake_case__ : Dict = self._check_and_preprocess_points(
input_points=__SCREAMING_SNAKE_CASE , input_labels=__SCREAMING_SNAKE_CASE , input_boxes=__SCREAMING_SNAKE_CASE , )
snake_case__ : Optional[Any] = self._normalize_and_convert(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , input_points=__SCREAMING_SNAKE_CASE , input_labels=__SCREAMING_SNAKE_CASE , input_boxes=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , )
return encoding_image_processor
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="pt" , ):
if input_points is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = [
self._normalize_coordinates(self.target_size , __SCREAMING_SNAKE_CASE , original_sizes[0] ) for point in input_points
]
else:
snake_case__ : Any = [
self._normalize_coordinates(self.target_size , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for point, original_size in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
snake_case__ , snake_case__ : int = self._pad_points_and_labels(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = np.array(__SCREAMING_SNAKE_CASE )
if input_labels is not None:
snake_case__ : Tuple = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
snake_case__ : int = [
self._normalize_coordinates(self.target_size , __SCREAMING_SNAKE_CASE , original_sizes[0] , is_bounding_box=__SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
snake_case__ : int = [
self._normalize_coordinates(self.target_size , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , is_bounding_box=__SCREAMING_SNAKE_CASE )
for box, original_size in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
snake_case__ : List[str] = np.array(__SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
snake_case__ : Union[str, Any] = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
snake_case__ : Any = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
snake_case__ : int = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
snake_case__ : List[str] = tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
snake_case__ : Optional[Any] = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
snake_case__ : List[Any] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
snake_case__ : List[Any] = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
snake_case__ : Union[str, Any] = tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
snake_case__ : Any = torch.from_numpy(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
snake_case__ : Optional[Any] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
snake_case__ : Optional[Any] = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
snake_case__ : str = tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = max([point.shape[0] for point in input_points] )
snake_case__ : List[str] = []
for i, point in enumerate(__SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
snake_case__ : Optional[int] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
snake_case__ : Dict = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(__SCREAMING_SNAKE_CASE )
snake_case__ : str = processed_input_points
return input_points, input_labels
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
snake_case__ , snake_case__ : Any = original_size
snake_case__ , snake_case__ : str = self.image_processor._get_preprocess_shape(__SCREAMING_SNAKE_CASE , longest_edge=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = deepcopy(__SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
if is_bounding_box:
snake_case__ : Dict = coords.reshape(-1 , 2 , 2 )
snake_case__ : str = coords[..., 0] * (new_w / old_w)
snake_case__ : Union[str, Any] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
snake_case__ : Tuple = coords.reshape(-1 , 4 )
return coords
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ):
if input_points is not None:
if hasattr(__SCREAMING_SNAKE_CASE , """numpy""" ): # Checks for TF or Torch tensor
snake_case__ : int = input_points.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0] , __SCREAMING_SNAKE_CASE ):
raise ValueError("""Input points must be a list of list of floating points.""" )
snake_case__ : Optional[Any] = [np.array(__SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
snake_case__ : Any = None
if input_labels is not None:
if hasattr(__SCREAMING_SNAKE_CASE , """numpy""" ):
snake_case__ : int = input_labels.numpy().tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0] , __SCREAMING_SNAKE_CASE ):
raise ValueError("""Input labels must be a list of list integers.""" )
snake_case__ : Union[str, Any] = [np.array(__SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
snake_case__ : str = None
if input_boxes is not None:
if hasattr(__SCREAMING_SNAKE_CASE , """numpy""" ):
snake_case__ : int = input_boxes.numpy().tolist()
if (
not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0] , __SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0] , __SCREAMING_SNAKE_CASE )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
snake_case__ : Union[str, Any] = [np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
snake_case__ : Any = None
return input_points, input_labels, input_boxes
@property
def __UpperCamelCase ( self ):
snake_case__ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return self.image_processor.post_process_masks(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
snake_case__ , snake_case__ : Dict = head.next, head
while fast and fast.next:
snake_case__ : Any = fast.next.next
snake_case__ : int = slow.next
snake_case__ : Dict = slow.next
snake_case__ : List[str] = None # Don't forget here! But forget still works!
# reverse the second part
snake_case__ : Tuple = None
while second:
snake_case__ : Tuple = second.next
snake_case__ : Any = node
snake_case__ : str = second
snake_case__ : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case__ : List[Any] = node.next
snake_case__ : int = head.next
return True
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[Any]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case__ : List[Any] = head
while fast and fast.next:
snake_case__ , snake_case__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case__ : Tuple = [slow.val]
while slow.next:
snake_case__ : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case__ : str = cur.next
return True
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
snake_case__ : int = {}
snake_case__ : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
snake_case__ : Tuple = [pos]
snake_case__ : Optional[Any] = head.next
pos += 1
snake_case__ : int = pos - 1
snake_case__ : str = 0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
snake_case__ : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 38 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A_ : Tuple = get_logger(__name__)
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''dummy_data'''
lowerCamelCase__ = '''datasets'''
lowerCamelCase__ = False
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = dataset_name
snake_case__ : Optional[int] = cache_dir
snake_case__ : Union[str, Any] = use_local_dummy_data
snake_case__ : int = config
# download_callbacks take a single url as input
snake_case__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
snake_case__ : List[str] = None
snake_case__ : List[str] = None
@property
def __UpperCamelCase ( self ):
if self._dummy_file is None:
snake_case__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __UpperCamelCase ( self ):
if self._bucket_url is None:
snake_case__ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __UpperCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return path
def __UpperCamelCase ( self ):
return {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
snake_case__ : List[Any] = single_urls
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
snake_case__ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
snake_case__ : List[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : List[str] = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
def _iter_archive_members(__SCREAMING_SNAKE_CASE ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : List[str] = Path(self.dummy_file ).parent
snake_case__ : Dict = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = Path(__SCREAMING_SNAKE_CASE )
snake_case__ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open("""rb""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__magic_name__ ) / len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = IFImgaImgSuperResolutionPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self ):
return self._get_superresolution_dummy_components()
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ):
self._test_save_load_local()
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 38 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = (DPMSolverSinglestepScheduler,)
lowerCamelCase__ = (('''num_inference_steps''', 25),)
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ):
snake_case__ : str = dict(self.forward_default_kwargs )
snake_case__ : Optional[int] = kwargs.pop("""num_inference_steps""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.dummy_sample
snake_case__ : Dict = 0.1 * sample
snake_case__ : Any = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case__ : Dict = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
snake_case__ : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
snake_case__ : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case__ , snake_case__ : Dict = sample, sample
for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
snake_case__ : List[Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
snake_case__ : Optional[int] = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = dict(self.forward_default_kwargs )
snake_case__ : Tuple = kwargs.pop("""num_inference_steps""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.dummy_sample
snake_case__ : Tuple = 0.1 * sample
snake_case__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case__ : List[str] = self.get_scheduler_config()
snake_case__ : Union[str, Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
snake_case__ : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case__ : str = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
snake_case__ : Dict = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
if scheduler is None:
snake_case__ : Any = self.scheduler_classes[0]
snake_case__ : List[str] = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = 1_0
snake_case__ : Union[str, Any] = self.dummy_model()
snake_case__ : str = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : int = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
snake_case__ : str = 5_0
snake_case__ : str = self.dummy_model()
snake_case__ : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
snake_case__ : Any = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
snake_case__ : Union[str, Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2574 ) < 1e-3
def __UpperCamelCase ( self ):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
snake_case__ : Optional[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
snake_case__ : List[Any] = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
snake_case__ : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
snake_case__ : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case__ : List[str] = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case__ : Union[str, Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case__ : List[str] = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def __UpperCamelCase ( self ):
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type="""dpmsolver++""" , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
snake_case__ : Tuple = self.full_loop(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def __UpperCamelCase ( self ):
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def __UpperCamelCase ( self ):
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
self.check_over_configs(variance_type="""learned_range""" )
def __UpperCamelCase ( self ):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.full_loop()
snake_case__ : str = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.full_loop(use_karras_sigmas=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2248 ) < 1e-3
def __UpperCamelCase ( self ):
snake_case__ : List[str] = self.full_loop(prediction_type="""v_prediction""" )
snake_case__ : str = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1453 ) < 1e-3
def __UpperCamelCase ( self ):
snake_case__ : int = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.0649 ) < 1e-3
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
snake_case__ : List[str] = scheduler_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = 1_0
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Any = self.dummy_sample_deter.half()
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Tuple = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 38 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Dict = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=None , __magic_name__ : List[str]=None ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = True
while ask_again:
snake_case__ : Optional[Any] = input(__magic_name__ )
try:
if default is not None and len(__magic_name__ ) == 0:
return default
return convert_value(__magic_name__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Any=[] , __magic_name__ : Optional[int]=None , __magic_name__ : int=0 ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = BulletMenu(__magic_name__ , __magic_name__ )
snake_case__ : Optional[Any] = menu.run(default_choice=__magic_name__ )
return convert_value(__magic_name__ ) if convert_value is not None else result
def UpperCamelCase__ ( __magic_name__ : Any ) -> int:
'''simple docstring'''
snake_case__ : Tuple = int(__magic_name__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase__ ( __magic_name__ : str ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = int(__magic_name__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = int(__magic_name__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Tuple:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __snake_case ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 38 | 1 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
A_ : Optional[int] = True
except (ImportError, ModuleNotFoundError):
A_ : List[Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
re.sub("""<n>""" , """""" , __magic_name__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__magic_name__ ) )
| 38 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__magic_name__ ) / len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
A_ : Union[str, Any] = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int=None ) -> str:
'''simple docstring'''
snake_case__ : Tuple = XLNetConfig.from_json_file(__magic_name__ )
snake_case__ : str = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
snake_case__ : Any = finetuning_task
snake_case__ : List[Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
snake_case__ : Optional[Any] = XLNetForSequenceClassification(__magic_name__ )
elif "squad" in finetuning_task:
snake_case__ : int = finetuning_task
snake_case__ : str = XLNetForQuestionAnswering(__magic_name__ )
else:
snake_case__ : List[str] = XLNetLMHeadModel(__magic_name__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
snake_case__ : List[str] = os.path.join(__magic_name__ , __magic_name__ )
snake_case__ : Tuple = os.path.join(__magic_name__ , __magic_name__ )
print(f"Save PyTorch model to {os.path.abspath(__magic_name__ )}" )
torch.save(model.state_dict() , __magic_name__ )
print(f"Save configuration file to {os.path.abspath(__magic_name__ )}" )
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
A_ : Union[str, Any] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 38 |
'''simple docstring'''
from __future__ import annotations
A_ : str = "Muhammad Umer Farooq"
A_ : Optional[Any] = "MIT"
A_ : int = "1.0.0"
A_ : int = "Muhammad Umer Farooq"
A_ : int = "[email protected]"
A_ : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__()
snake_case__ : list[str] = []
snake_case__ : List[Any] = domain
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case__ : str = parse.urljoin(self.domain , __SCREAMING_SNAKE_CASE )
self.urls.append(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(__magic_name__ ).split(""".""" )[-2:] )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return parse.urlparse(__magic_name__ ).netloc
def UpperCamelCase__ ( __magic_name__ : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
snake_case__ : List[str] = get_domain_name(__magic_name__ )
# Initialize the parser
snake_case__ : Optional[Any] = Parser(__magic_name__ )
try:
# Open URL
snake_case__ : Any = requests.get(__magic_name__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case__ : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case__ : Tuple = requests.get(__magic_name__ )
# Get the valid email.
snake_case__ : List[str] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__magic_name__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__magic_name__ )
if __name__ == "__main__":
A_ : str = emails_from_url("https://github.com")
print(F'{len(emails)} emails found:')
print("\n".join(sorted(emails)))
| 38 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
A_ : Any = logging.getLogger(__name__)
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 42
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE = False , ):
snake_case__ : Dict = hans_processors[task]()
snake_case__ : Tuple = os.path.join(
__SCREAMING_SNAKE_CASE , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , ) , )
snake_case__ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case__ , snake_case__ : List[str] = label_list[2], label_list[1]
snake_case__ : List[str] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case__ : Optional[Any] = cached_features_file + """.lock"""
with FileLock(__SCREAMING_SNAKE_CASE ):
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
snake_case__ : str = torch.load(__SCREAMING_SNAKE_CASE )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
snake_case__ : List[Any] = (
processor.get_dev_examples(__SCREAMING_SNAKE_CASE ) if evaluate else processor.get_train_examples(__SCREAMING_SNAKE_CASE )
)
logger.info("""Training examples: %s""" , len(__SCREAMING_SNAKE_CASE ) )
snake_case__ : List[Any] = hans_convert_examples_to_features(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info("""Saving features into cached file %s""" , __SCREAMING_SNAKE_CASE )
torch.save(self.features , __SCREAMING_SNAKE_CASE )
def __len__( self ):
return len(self.features )
def __getitem__( self , __SCREAMING_SNAKE_CASE ):
return self.features[i]
def __UpperCamelCase ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = 42
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1_2_8 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE = False , ):
snake_case__ : Tuple = hans_processors[task]()
snake_case__ : Any = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case__ , snake_case__ : List[Any] = label_list[2], label_list[1]
snake_case__ : int = label_list
snake_case__ : Union[str, Any] = processor.get_dev_examples(__SCREAMING_SNAKE_CASE ) if evaluate else processor.get_train_examples(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = hans_convert_examples_to_features(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(__SCREAMING_SNAKE_CASE )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case__ : Optional[int] = tf.data.Dataset.from_generator(
__SCREAMING_SNAKE_CASE , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def __UpperCamelCase ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self , __SCREAMING_SNAKE_CASE ):
return self.features[i]
def __UpperCamelCase ( self ):
return self.label_list
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
return self._create_examples(self._read_tsv(os.path.join(__SCREAMING_SNAKE_CASE , """heuristics_train_set.txt""" ) ) , """train""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
return self._create_examples(self._read_tsv(os.path.join(__SCREAMING_SNAKE_CASE , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def __UpperCamelCase ( self ):
return ["contradiction", "entailment", "neutral"]
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = []
for i, line in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
continue
snake_case__ : int = """%s-%s""" % (set_type, line[0])
snake_case__ : Optional[Any] = line[5]
snake_case__ : int = line[6]
snake_case__ : Union[str, Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
snake_case__ : Optional[Any] = line[0]
examples.append(InputExample(guid=__SCREAMING_SNAKE_CASE , text_a=__SCREAMING_SNAKE_CASE , text_b=__SCREAMING_SNAKE_CASE , label=__SCREAMING_SNAKE_CASE , pairID=__SCREAMING_SNAKE_CASE ) )
return examples
def UpperCamelCase__ ( __magic_name__ : List[InputExample] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : PreTrainedTokenizer , ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = {label: i for i, label in enumerate(__magic_name__ )}
snake_case__ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(__magic_name__ ) , desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d""" % (ex_index) )
snake_case__ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=__magic_name__ , max_length=__magic_name__ , padding="""max_length""" , truncation=__magic_name__ , return_overflowing_tokens=__magic_name__ , )
snake_case__ : Tuple = label_map[example.label] if example.label in label_map else 0
snake_case__ : Tuple = int(example.pairID )
features.append(InputFeatures(**__magic_name__ , label=__magic_name__ , pairID=__magic_name__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"guid: {example}" )
logger.info(f"features: {features[i]}" )
return features
A_ : int = {
"hans": 3,
}
A_ : Tuple = {
"hans": HansProcessor,
}
| 38 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
snake_case__ , snake_case__ : Dict = head.next, head
while fast and fast.next:
snake_case__ : Any = fast.next.next
snake_case__ : int = slow.next
snake_case__ : Dict = slow.next
snake_case__ : List[str] = None # Don't forget here! But forget still works!
# reverse the second part
snake_case__ : Tuple = None
while second:
snake_case__ : Tuple = second.next
snake_case__ : Any = node
snake_case__ : str = second
snake_case__ : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case__ : List[Any] = node.next
snake_case__ : int = head.next
return True
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[Any]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case__ : List[Any] = head
while fast and fast.next:
snake_case__ , snake_case__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case__ : Tuple = [slow.val]
while slow.next:
snake_case__ : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case__ : str = cur.next
return True
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
snake_case__ : int = {}
snake_case__ : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
snake_case__ : Tuple = [pos]
snake_case__ : Optional[Any] = head.next
pos += 1
snake_case__ : int = pos - 1
snake_case__ : str = 0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
snake_case__ : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 38 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : Dict = logging.get_logger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''maskformer-swin'''
lowerCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=2_2_4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=9_6 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 1_2, 2_4] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
snake_case__ : str = image_size
snake_case__ : Tuple = patch_size
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = embed_dim
snake_case__ : List[str] = depths
snake_case__ : Tuple = len(__SCREAMING_SNAKE_CASE )
snake_case__ : int = num_heads
snake_case__ : int = window_size
snake_case__ : List[str] = mlp_ratio
snake_case__ : Union[str, Any] = qkv_bias
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Dict = drop_path_rate
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Dict = use_absolute_embeddings
snake_case__ : str = layer_norm_eps
snake_case__ : int = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case__ : str = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
snake_case__ : Optional[Any] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ : str = 250004
A_ : str = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = MBartTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case__ : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : int = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : Tuple = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case__ : Any = tempfile.mkdtemp()
snake_case__ : Optional[int] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : List[Any] = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : Union[str, Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case__ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = '''facebook/mbart-large-en-ro'''
lowerCamelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase__ = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __UpperCamelCase ( cls ):
snake_case__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
snake_case__ : Any = 1
return cls
def __UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
snake_case__ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case__ : List[Any] = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = 1_0
snake_case__ : int = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = tempfile.mkdtemp()
snake_case__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = MBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case__ : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case__ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
snake_case__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1_0 , return_tensors="""pt""" )
snake_case__ : str = targets["""input_ids"""]
snake_case__ : Optional[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 38 | 1 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
A_ , A_ , A_ : Union[str, Any] = False, False, False
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = None
# Automatically constructed
lowerCamelCase__ = "dict"
lowerCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCamelCase__ = field(default='''Audio''' , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ):
return self.pa_type
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return {"bytes": None, "path": value}
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
snake_case__ : Tuple = BytesIO()
sf.write(__SCREAMING_SNAKE_CASE , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
snake_case__ : List[str] = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
snake_case__ : Tuple = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 3_2_7_6_7
snake_case__ : str = BytesIO(bytes() )
sf.write(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
snake_case__ , snake_case__ : str = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
snake_case__ : Optional[Any] = xsplitext(__SCREAMING_SNAKE_CASE )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
snake_case__ : str = token_per_repo_id or {}
snake_case__ : Tuple = path.split("""::""" )[-1]
try:
snake_case__ : str = string_to_dict(__SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )["""repo_id"""]
snake_case__ : int = token_per_repo_id[repo_id]
except (ValueError, KeyError):
snake_case__ : Dict = None
with xopen(__SCREAMING_SNAKE_CASE , """rb""" , use_auth_token=__SCREAMING_SNAKE_CASE ) as f:
snake_case__ , snake_case__ : Optional[int] = sf.read(__SCREAMING_SNAKE_CASE )
else:
snake_case__ , snake_case__ : Tuple = sf.read(__SCREAMING_SNAKE_CASE )
snake_case__ : str = array.T
if self.mono:
snake_case__ : str = librosa.to_mono(__SCREAMING_SNAKE_CASE )
if self.sampling_rate and self.sampling_rate != sampling_rate:
snake_case__ : List[Any] = librosa.resample(__SCREAMING_SNAKE_CASE , orig_sr=__SCREAMING_SNAKE_CASE , target_sr=self.sampling_rate )
snake_case__ : List[str] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __UpperCamelCase ( self ):
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if pa.types.is_string(storage.type ):
snake_case__ : List[str] = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.binary() )
snake_case__ : Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case__ : List[str] = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.string() )
snake_case__ : List[str] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
snake_case__ : Dict = pa.array([Audio().encode_example(__SCREAMING_SNAKE_CASE ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
snake_case__ : Tuple = storage.field("""bytes""" )
else:
snake_case__ : Any = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
snake_case__ : List[Any] = storage.field("""path""" )
else:
snake_case__ : Union[str, Any] = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.string() )
snake_case__ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(__SCREAMING_SNAKE_CASE , self.pa_type )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
@no_op_if_value_is_null
def path_to_bytes(__SCREAMING_SNAKE_CASE ):
with xopen(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
snake_case__ : int = f.read()
return bytes_
snake_case__ : Optional[int] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
snake_case__ : Optional[Any] = pa.array(
[os.path.basename(__SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
snake_case__ : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(__SCREAMING_SNAKE_CASE , self.pa_type )
| 38 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''bit'''
lowerCamelCase__ = ['''preactivation''', '''bottleneck''']
lowerCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="preactivation" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ : Tuple = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
snake_case__ : List[str] = num_channels
snake_case__ : Tuple = embedding_size
snake_case__ : str = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : List[Any] = layer_type
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = global_padding
snake_case__ : List[str] = num_groups
snake_case__ : str = drop_path_rate
snake_case__ : List[Any] = embedding_dynamic_padding
snake_case__ : List[str] = output_stride
snake_case__ : Dict = width_factor
snake_case__ : List[str] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCamelCase__ ( __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : str , __magic_name__ : Path , __magic_name__ : str = None , __magic_name__ : str = None , __magic_name__ : str = None , ) -> Optional[Any]:
'''simple docstring'''
if config_name_or_path is None:
snake_case__ : List[str] = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
snake_case__ : Optional[int] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
snake_case__ : Union[str, Any] = question_encoder_name_or_path
snake_case__ : Union[str, Any] = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
snake_case__ : Tuple = RagConfig.from_pretrained(__magic_name__ )
snake_case__ : int = AutoConfig.from_pretrained(__magic_name__ )
snake_case__ : List[str] = AutoConfig.from_pretrained(__magic_name__ )
snake_case__ : Optional[int] = gen_config
snake_case__ : str = question_encoder_config
snake_case__ : Any = model_class.from_pretrained_question_encoder_generator(
__magic_name__ , __magic_name__ , config=__magic_name__ )
rag_model.save_pretrained(__magic_name__ )
# Sanity check.
model_class.from_pretrained(__magic_name__ )
# Save tokenizers.
snake_case__ : int = AutoTokenizer.from_pretrained(__magic_name__ )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(__magic_name__ )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
A_ : Tuple = parser.parse_args()
A_ : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 38 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=False ) -> Tuple:
'''simple docstring'''
snake_case__ : int = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : int = """"""
else:
snake_case__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = dct.pop(__magic_name__ )
snake_case__ : Dict = val
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int=False ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=__magic_name__ , )
snake_case__ : Optional[int] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=3_84 , num_labels=10_00 )
snake_case__ : Union[str, Any] = False
# load original model from timm
snake_case__ : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case__ : int = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : str = """huggingface/label-files"""
snake_case__ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case__ : Dict = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : str = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case__ : Union[str, Any] = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case__ : Optional[Any] = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case__ : Union[str, Any] = transform.transforms
snake_case__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case__ : Any = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ : Any = prepare_img()
snake_case__ : int = transform(__magic_name__ ).unsqueeze(0 )
snake_case__ : List[str] = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case__ : Optional[Any] = model(__magic_name__ )
snake_case__ : Union[str, Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
snake_case__ : Dict = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1E-3 )
else:
snake_case__ : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 38 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
A_ : Optional[Any] = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
A_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 38 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 42
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE=True , ):
super().__init__()
snake_case__ : str = layers_per_block
snake_case__ : int = torch.nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : List[Any] = None
snake_case__ : List[Any] = nn.ModuleList([] )
# down
snake_case__ : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = output_channel
snake_case__ : Union[str, Any] = block_out_channels[i]
snake_case__ : int = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : str = get_down_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
snake_case__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# out
snake_case__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : str = 2 * out_channels if double_z else out_channels
snake_case__ : int = nn.Convad(block_out_channels[-1] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : Union[str, Any] = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = x
snake_case__ : int = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
snake_case__ : List[str] = down_block(__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
snake_case__ : Any = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : str = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE="group" , ):
super().__init__()
snake_case__ : Any = layers_per_block
snake_case__ : Optional[Any] = nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : Union[str, Any] = None
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : Optional[int] = in_channels if norm_type == """spatial""" else None
# mid
snake_case__ : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# up
snake_case__ : List[Any] = list(reversed(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = output_channel
snake_case__ : Optional[Any] = reversed_block_out_channels[i]
snake_case__ : List[str] = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : int = get_up_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , resnet_time_scale_shift=__SCREAMING_SNAKE_CASE , )
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : int = output_channel
# out
if norm_type == "spatial":
snake_case__ : List[Any] = SpatialNorm(block_out_channels[0] , __SCREAMING_SNAKE_CASE )
else:
snake_case__ : Any = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : Union[str, Any] = nn.Convad(block_out_channels[0] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : int = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Union[str, Any] = z
snake_case__ : Any = self.conv_in(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
snake_case__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
snake_case__ : int = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : List[Any] = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : Dict = up_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
snake_case__ : Optional[Any] = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : str = self.conv_norm_out(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="random" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ):
super().__init__()
snake_case__ : int = n_e
snake_case__ : Optional[int] = vq_embed_dim
snake_case__ : int = beta
snake_case__ : Optional[int] = legacy
snake_case__ : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
snake_case__ : List[str] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
snake_case__ : Optional[Any] = self.used.shape[0]
snake_case__ : List[str] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
snake_case__ : Dict = self.re_embed
snake_case__ : List[str] = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
snake_case__ : Union[str, Any] = n_e
snake_case__ : str = sane_index_shape
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : Dict = inds.reshape(ishape[0] , -1 )
snake_case__ : Any = self.used.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = (inds[:, :, None] == used[None, None, ...]).long()
snake_case__ : List[Any] = match.argmax(-1 )
snake_case__ : List[str] = match.sum(2 ) < 1
if self.unknown_index == "random":
snake_case__ : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
snake_case__ : Optional[Any] = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : int = inds.reshape(ishape[0] , -1 )
snake_case__ : Optional[int] = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
snake_case__ : List[Any] = 0 # simply set to zero
snake_case__ : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
# reshape z -> (batch, height, width, channel) and flatten
snake_case__ : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
snake_case__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
snake_case__ : Dict = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
snake_case__ : Union[str, Any] = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
snake_case__ : Tuple = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
snake_case__ : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
snake_case__ : Any = z + (z_q - z).detach()
# reshape back to match original input shape
snake_case__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
snake_case__ : List[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
snake_case__ : str = self.remap_to_used(__SCREAMING_SNAKE_CASE )
snake_case__ : str = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
snake_case__ : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
snake_case__ : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
snake_case__ : Optional[int] = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
snake_case__ : int = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
snake_case__ : str = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
snake_case__ : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
snake_case__ : Tuple = parameters
snake_case__ , snake_case__ : Any = torch.chunk(__SCREAMING_SNAKE_CASE , 2 , dim=1 )
snake_case__ : Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
snake_case__ : Optional[int] = deterministic
snake_case__ : Optional[int] = torch.exp(0.5 * self.logvar )
snake_case__ : Any = torch.exp(self.logvar )
if self.deterministic:
snake_case__ : List[str] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE = None ):
# make sure sample is on the same device as the parameters and has same dtype
snake_case__ : Dict = randn_tensor(
self.mean.shape , generator=__SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
snake_case__ : Optional[int] = self.mean + self.std * sample
return x
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
snake_case__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
return self.mean
| 38 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Tuple = 1
while len(__magic_name__ ) < 1E6:
constant.append(str(__magic_name__ ) )
i += 1
snake_case__ : Optional[Any] = """""".join(__magic_name__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 38 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1, 3_8_4, 2_4, 2_4] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : str = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : str = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : str = backbone_out_indices
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Dict = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : str = backbone_featmap_shape
snake_case__ : List[Any] = scope
snake_case__ : Optional[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
snake_case__ : List[Any] = (image_size // patch_size) ** 2
snake_case__ : Union[str, Any] = num_patches + 1
def __UpperCamelCase ( self ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
snake_case__ : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [9_6, 1_9_2, 3_8_4, 7_6_8],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = DPTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : str = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : Dict = DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = DPTModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
snake_case__ : Optional[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = False
snake_case__ : str = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
snake_case__ : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(config=__SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
snake_case__ : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
snake_case__ : Optional[int] = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase ( self ):
pass
@slow
def __UpperCamelCase ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
snake_case__ : List[str] = DPTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = """add"""
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
snake_case__ : Union[str, Any] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = outputs.predicted_depth
# verify the predicted depth
snake_case__ : Any = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 1 |
'''simple docstring'''
from manim import *
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = Rectangle(height=0.5 , width=0.5 )
snake_case__ : Optional[Any] = Rectangle(height=0.25 , width=0.25 )
snake_case__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case__ : Optional[int] = [mem.copy() for i in range(6 )]
snake_case__ : Tuple = [mem.copy() for i in range(6 )]
snake_case__ : Optional[Any] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
snake_case__ : Optional[Any] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
snake_case__ : Union[str, Any] = VGroup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
snake_case__ : Any = Text("""CPU""" , font_size=2_4 )
snake_case__ : Dict = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = [mem.copy() for i in range(4 )]
snake_case__ : Optional[int] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
snake_case__ : str = Text("""GPU""" , font_size=2_4 )
snake_case__ : Tuple = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = [mem.copy() for i in range(6 )]
snake_case__ : Union[str, Any] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
snake_case__ : Union[str, Any] = Text("""Model""" , font_size=2_4 )
snake_case__ : List[Any] = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(__SCREAMING_SNAKE_CASE )
snake_case__ : str = []
snake_case__ : List[str] = []
snake_case__ : Any = []
for i, rect in enumerate(__SCREAMING_SNAKE_CASE ):
rect.set_stroke(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__SCREAMING_SNAKE_CASE , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__SCREAMING_SNAKE_CASE )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__SCREAMING_SNAKE_CASE , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__SCREAMING_SNAKE_CASE , buff=0.0 )
self.add(__SCREAMING_SNAKE_CASE )
model_cpu_arr.append(__SCREAMING_SNAKE_CASE )
self.add(*__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = [mem.copy() for i in range(6 )]
snake_case__ : Optional[Any] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
snake_case__ : List[Any] = Text("""Loaded Checkpoint""" , font_size=2_4 )
snake_case__ : Any = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
checkpoint.move_to([3, 0.5, 0] )
self.add(__SCREAMING_SNAKE_CASE )
snake_case__ : str = []
snake_case__ : int = []
for i, rect in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = fill.copy().set_fill(__SCREAMING_SNAKE_CASE , opacity=0.7 )
target.move_to(__SCREAMING_SNAKE_CASE )
ckpt_arr.append(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__SCREAMING_SNAKE_CASE )
self.add(*__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
snake_case__ : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case__ : Tuple = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : int = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=1_8 , )
blue_text.next_to(__SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
snake_case__ : List[Any] = [meta_mem.copy() for i in range(6 )]
snake_case__ : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
snake_case__ : Optional[int] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
snake_case__ : str = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
snake_case__ : int = VGroup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
snake_case__ : str = Text("""Disk""" , font_size=2_4 )
snake_case__ : Union[str, Any] = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__SCREAMING_SNAKE_CASE , run_time=3 ) , Write(__SCREAMING_SNAKE_CASE , run_time=1 ) , Create(__SCREAMING_SNAKE_CASE , run_time=1 ) )
snake_case__ : Optional[int] = []
for i, rect in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : int = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__SCREAMING_SNAKE_CASE , run_time=1.5 ) )
self.play(*__SCREAMING_SNAKE_CASE )
self.play(FadeOut(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__SCREAMING_SNAKE_CASE , run_time=3 ) )
self.play(
FadeOut(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) , )
self.wait()
| 38 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case__ : int = botoa.client("""iam""" )
snake_case__ : Union[str, Any] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__magic_name__ , AssumeRolePolicyDocument=json.dumps(__magic_name__ , indent=2 ) )
snake_case__ : Dict = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__magic_name__ , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(__magic_name__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def UpperCamelCase__ ( __magic_name__ : Any ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = botoa.client("""iam""" )
return iam_client.get_role(RoleName=__magic_name__ )["Role"]["Arn"]
def UpperCamelCase__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __magic_name__ , )
snake_case__ : List[Any] = None
if credentials_configuration == 0:
snake_case__ : Dict = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
snake_case__ : List[str] = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
snake_case__ : List[str] = _ask_field("""AWS Access Key ID: """ )
snake_case__ : int = aws_access_key_id
snake_case__ : Optional[Any] = _ask_field("""AWS Secret Access Key: """ )
snake_case__ : List[str] = aws_secret_access_key
snake_case__ : Tuple = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
snake_case__ : Optional[int] = aws_region
snake_case__ : int = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __magic_name__ , )
if role_management == 0:
snake_case__ : Optional[Any] = _ask_field("""Enter your IAM role name: """ )
else:
snake_case__ : Optional[int] = """accelerate_sagemaker_execution_role"""
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(__magic_name__ )
snake_case__ : Dict = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Any = None
if is_custom_docker_image:
snake_case__ : str = _ask_field("""Enter your Docker image: """ , lambda __magic_name__ : str(__magic_name__ ).lower() )
snake_case__ : Tuple = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : List[Any] = None
if is_sagemaker_inputs_enabled:
snake_case__ : str = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Optional[int] = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Optional[Any] = None
if is_sagemaker_metrics_enabled:
snake_case__ : List[Any] = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Tuple = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
snake_case__ : Any = {}
snake_case__ : List[Any] = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
snake_case__ : str = """dynamo_"""
snake_case__ : Tuple = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case__ : List[str] = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
snake_case__ : str = _ask_options(
"""Which mode do you want to use?""" , __magic_name__ , lambda __magic_name__ : TORCH_DYNAMO_MODES[int(__magic_name__ )] , default="""default""" , )
snake_case__ : Union[str, Any] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : str = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Dict = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
snake_case__ : List[str] = _ask_options(
__magic_name__ , __magic_name__ , lambda __magic_name__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__magic_name__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case__ : Optional[int] = _ask_field(__magic_name__ , lambda __magic_name__ : str(__magic_name__ ).lower() , default="""ml.p3.2xlarge""" )
snake_case__ : Dict = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case__ : Optional[Any] = _ask_field(
"""How many machines do you want use? [1]: """ , __magic_name__ , default=1 , )
snake_case__ : Union[str, Any] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=__magic_name__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__magic_name__ , use_cpu=__magic_name__ , dynamo_config=__magic_name__ , eca_instance_type=__magic_name__ , profile=__magic_name__ , region=__magic_name__ , iam_role_name=__magic_name__ , mixed_precision=__magic_name__ , num_machines=__magic_name__ , sagemaker_inputs_file=__magic_name__ , sagemaker_metrics_file=__magic_name__ , )
| 38 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """depth_multiplier""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.25 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=1_0_2_4 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="relu6" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : Dict = parent
snake_case__ : Dict = batch_size
snake_case__ : int = num_channels
snake_case__ : Optional[Any] = image_size
snake_case__ : Optional[Any] = depth_multiplier
snake_case__ : Tuple = min_depth
snake_case__ : int = tf_padding
snake_case__ : List[Any] = int(last_hidden_size * depth_multiplier )
snake_case__ : Tuple = output_stride
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Optional[int] = classifier_dropout_prob
snake_case__ : Optional[Any] = use_labels
snake_case__ : Tuple = is_training
snake_case__ : Dict = num_labels
snake_case__ : int = initializer_range
snake_case__ : Optional[Any] = scope
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
snake_case__ : List[str] = None
if self.use_labels:
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = MobileNetVaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = self.num_labels
snake_case__ : Tuple = MobileNetVaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ):
snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = config_and_inputs
snake_case__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[str] = MobileNetVaModelTester(self )
snake_case__ : str = MobileNetVaConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Dict = [*signature.parameters.keys()]
snake_case__ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
snake_case__ : Dict = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = outputs.hidden_states
snake_case__ : int = 2_6
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Optional[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Dict = MobileNetVaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> List[str]:
'''simple docstring'''
snake_case__ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self ):
snake_case__ : int = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.default_image_processor
snake_case__ : List[str] = prepare_img()
snake_case__ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : List[Any] = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase__ ( __magic_name__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
snake_case__ : Union[str, Any] = f"https://www.amazon.in/laptop/s?k={product}"
snake_case__ : List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
snake_case__ : int = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
snake_case__ : Optional[int] = item.ha.text
snake_case__ : Any = """https://www.amazon.in/""" + item.ha.a["""href"""]
snake_case__ : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
snake_case__ : Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
snake_case__ : Optional[int] = """Not available"""
try:
snake_case__ : Tuple = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
snake_case__ : Optional[Any] = """"""
try:
snake_case__ : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_00 )
except ValueError:
snake_case__ : List[Any] = float("""nan""" )
except AttributeError:
pass
snake_case__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : List[Any] = """ """
snake_case__ : Union[str, Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : int = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 38 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = KandinskyVaaPipeline
lowerCamelCase__ = [
'''image_embeds''',
'''negative_image_embeds''',
]
lowerCamelCase__ = ['''image_embeds''', '''negative_image_embeds''']
lowerCamelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase__ = False
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
return self.time_input_dim
@property
def __UpperCamelCase ( self ):
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self ):
return 1_0_0
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case__ : Dict = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def __UpperCamelCase ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.dummy_unet
snake_case__ : Union[str, Any] = self.dummy_movq
snake_case__ : List[Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__SCREAMING_SNAKE_CASE , )
snake_case__ : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
snake_case__ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : int = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Optional[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def __UpperCamelCase ( self ):
snake_case__ : str = """cpu"""
snake_case__ : Dict = self.get_dummy_components()
snake_case__ : int = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
snake_case__ : int = output.images
snake_case__ : Dict = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1]
snake_case__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Tuple = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
snake_case__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy""" )
snake_case__ : int = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
snake_case__ : str = KandinskyVaaPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
snake_case__ : List[str] = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : int = """red cat, 4k photo"""
snake_case__ : Dict = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case__ , snake_case__ : Optional[Any] = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case__ : Dict = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case__ : Optional[int] = pipeline(
image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , output_type="""np""" , )
snake_case__ : Optional[int] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LongformerTokenizer
lowerCamelCase__ = True
lowerCamelCase__ = LongformerTokenizerFast
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Optional[int] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Any = {"""unk_token""": """<unk>"""}
snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = """lower newer"""
snake_case__ : Dict = """lower newer"""
return input_text, output_text
def __UpperCamelCase ( self ):
snake_case__ : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = """lower newer"""
snake_case__ : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokens + [tokenizer.unk_token]
snake_case__ : List[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
snake_case__ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : int = """Encode this sequence."""
snake_case__ : Union[str, Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
snake_case__ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
snake_case__ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
snake_case__ : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
snake_case__ : Dict = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Encode <mask> sequence"""
snake_case__ : Tuple = """Encode <mask>sequence"""
snake_case__ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : str = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """A, <mask> AllenNLP sentence."""
snake_case__ : str = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __UpperCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case__ : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""trim_offsets"""] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ : Any = f"{text_of_1_token} {text_of_1_token}"
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 38 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
A_ : Tuple = logging.get_logger(__name__)
A_ : Any = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''perceiver'''
def __init__( self , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=1_2_8_0 , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2_6 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="kv" , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2_6_2 , __SCREAMING_SNAKE_CASE=2_0_4_8 , __SCREAMING_SNAKE_CASE=5_6 , __SCREAMING_SNAKE_CASE=[3_6_8, 4_9_6] , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=1_9_2_0 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=[1, 1_6, 2_2_4, 2_2_4] , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
snake_case__ : Any = num_latents
snake_case__ : Any = d_latents
snake_case__ : List[Any] = d_model
snake_case__ : List[Any] = num_blocks
snake_case__ : Optional[int] = num_self_attends_per_block
snake_case__ : Tuple = num_self_attention_heads
snake_case__ : Optional[int] = num_cross_attention_heads
snake_case__ : Union[str, Any] = qk_channels
snake_case__ : Union[str, Any] = v_channels
snake_case__ : Any = cross_attention_shape_for_attention
snake_case__ : List[Any] = self_attention_widening_factor
snake_case__ : Optional[int] = cross_attention_widening_factor
snake_case__ : Dict = hidden_act
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : Union[str, Any] = layer_norm_eps
snake_case__ : Optional[Any] = use_query_residual
# masked language modeling attributes
snake_case__ : Optional[Any] = vocab_size
snake_case__ : List[Any] = max_position_embeddings
# image classification attributes
snake_case__ : List[str] = image_size
# flow attributes
snake_case__ : Union[str, Any] = train_size
# multimodal autoencoding attributes
snake_case__ : Tuple = num_frames
snake_case__ : List[str] = audio_samples_per_frame
snake_case__ : Optional[int] = samples_per_patch
snake_case__ : Dict = output_shape
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCamelCase ( self ):
if self.task == "multiple-choice":
snake_case__ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case__ : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 4_0 , __SCREAMING_SNAKE_CASE = 4_0 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case__ : Any = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case__ : str = preprocessor.num_special_tokens_to_add(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
snake_case__ : Any = [""" """.join(["""a"""] ) * seq_length] * batch_size
snake_case__ : Tuple = dict(preprocessor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) )
snake_case__ : Dict = inputs.pop("""input_ids""" )
return inputs
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case__ : Optional[int] = compute_effective_axis_dimension(__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch )
snake_case__ : Any = self._generate_dummy_images(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = dict(preprocessor(images=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) )
snake_case__ : List[str] = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 38 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Any = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''resnet'''
lowerCamelCase__ = ['''basic''', '''bottleneck''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
snake_case__ : List[Any] = num_channels
snake_case__ : str = embedding_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : Dict = depths
snake_case__ : List[Any] = layer_type
snake_case__ : int = hidden_act
snake_case__ : Union[str, Any] = downsample_in_first_stage
snake_case__ : Dict = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-3
| 38 | 1 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def UpperCamelCase__ ( __magic_name__ : int ) -> List[Any]: # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[int]: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : str = {}
snake_case__ : Tuple = []
snake_case__ : List[Any] = 1
snake_case__ : Union[str, Any] = [1, 2]
snake_case__ : str = {"""a""": 1, """b""": 2}
snake_case__ : Dict = {"""a""": [1, 2], """b""": [3, 4]}
snake_case__ : Dict = {"""a""": {"""1""": 1}, """b""": 2}
snake_case__ : Optional[int] = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
snake_case__ : int = {}
snake_case__ : Tuple = []
snake_case__ : Optional[Any] = 2
snake_case__ : Tuple = [2, 3]
snake_case__ : str = {"""a""": 2, """b""": 3}
snake_case__ : List[Any] = {"""a""": [2, 3], """b""": [4, 5]}
snake_case__ : Union[str, Any] = {"""a""": {"""1""": 2}, """b""": 3}
snake_case__ : Optional[int] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = 2
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
snake_case__ : int = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
snake_case__ : int = {"""a""": 2, """b""": 0, """c""": 2}
snake_case__ : Union[str, Any] = {
"""a""": np.eye(2 ).astype(__SCREAMING_SNAKE_CASE ),
"""b""": np.zeros(3 ).astype(__SCREAMING_SNAKE_CASE ),
"""c""": np.ones(2 ).astype(__SCREAMING_SNAKE_CASE ),
}
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , map_numpy=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , map_numpy=__SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , map_numpy=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , map_numpy=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__SCREAMING_SNAKE_CASE ): # can't pickle a local lambda
map_nested(lambda __SCREAMING_SNAKE_CASE : x + 1 , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = {"""a""": 1, """b""": 2}
snake_case__ : Optional[int] = {"""a""": 3, """b""": 4}
snake_case__ : List[Any] = {"""a""": 5, """b""": 6}
snake_case__ : Optional[Any] = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''bar'''
snake_case__ : str = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(__SCREAMING_SNAKE_CASE , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any ) -> Optional[int]:
'''simple docstring'''
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
snake_case__ : Optional[int] = {f"{i}": i for i in range(__magic_name__ )}
snake_case__ : List[str] = map_nested(lambda __magic_name__ : x + 10 , __magic_name__ , num_proc=__magic_name__ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_tf
def __UpperCamelCase ( self ):
import tensorflow as tf
from tensorflow.keras import layers
snake_case__ : Any = layers.Dense(2 )
def gen_random_output():
snake_case__ : Union[str, Any] = tf.random.uniform((1, 3) )
return model(__SCREAMING_SNAKE_CASE ).numpy()
with temp_seed(4_2 , set_tensorflow=__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = gen_random_output()
with temp_seed(4_2 , set_tensorflow=__SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = gen_random_output()
snake_case__ : Optional[Any] = gen_random_output()
np.testing.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCamelCase ( self ):
import torch
def gen_random_output():
snake_case__ : List[Any] = torch.nn.Linear(3 , 2 )
snake_case__ : Dict = torch.rand(1 , 3 )
return model(__SCREAMING_SNAKE_CASE ).detach().numpy()
with temp_seed(4_2 , set_pytorch=__SCREAMING_SNAKE_CASE ):
snake_case__ : int = gen_random_output()
with temp_seed(4_2 , set_pytorch=__SCREAMING_SNAKE_CASE ):
snake_case__ : str = gen_random_output()
snake_case__ : Any = gen_random_output()
np.testing.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCamelCase ( self ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
snake_case__ : Any = gen_random_output()
with temp_seed(4_2 ):
snake_case__ : Optional[Any] = gen_random_output()
snake_case__ : int = gen_random_output()
np.testing.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Dict:
'''simple docstring'''
snake_case__ : Union[str, Any] = NestedDataStructure(__magic_name__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[int] = NestedDataStructure(__magic_name__ ).flatten()
assert output == expected_output
def UpperCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = A(x=1 , y="""foobar""" )
snake_case__ : Optional[int] = {"""x""": 1, """y""": """foobar"""}
assert asdict(__magic_name__ ) == expected_output
snake_case__ : Tuple = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
snake_case__ : str = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(__magic_name__ ) == expected_output
with pytest.raises(__magic_name__ ):
asdict([1, A(x=10 , y="""foo""" )] )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return text.split()
def UpperCamelCase__ ( __magic_name__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def UpperCamelCase__ ( ) -> Optional[int]:
'''simple docstring'''
with Pool(2 ) as pool:
snake_case__ : Optional[Any] = list(iflatmap_unordered(__magic_name__ , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(__magic_name__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
snake_case__ : Dict = list(iflatmap_unordered(__magic_name__ , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(__magic_name__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
snake_case__ : Any = []
for yield_time, content in iflatmap_unordered(
__magic_name__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__magic_name__ )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(__magic_name__ ) == 4
| 38 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 38 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : int = logging.get_logger(__name__)
A_ : Any = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''glpn'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=-1 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
snake_case__ : int = num_channels
snake_case__ : Any = num_encoder_blocks
snake_case__ : str = depths
snake_case__ : str = sr_ratios
snake_case__ : Optional[int] = hidden_sizes
snake_case__ : Any = patch_sizes
snake_case__ : Any = strides
snake_case__ : Union[str, Any] = mlp_ratios
snake_case__ : int = num_attention_heads
snake_case__ : Optional[Any] = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[Any] = drop_path_rate
snake_case__ : str = layer_norm_eps
snake_case__ : Optional[int] = decoder_hidden_size
snake_case__ : Union[str, Any] = max_depth
snake_case__ : List[Any] = head_in_index
| 38 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
snake_case__ : str = []
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_init_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_evaluate""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_predict""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_save""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_log""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_prediction_step""" )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = tempfile.mkdtemp()
def __UpperCamelCase ( self ):
shutil.rmtree(self.output_dir )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : List[Any] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionModelConfig(a=__SCREAMING_SNAKE_CASE , b=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionPreTrainedModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = TrainingArguments(self.output_dir , disable_tqdm=__SCREAMING_SNAKE_CASE , report_to=[] , **__SCREAMING_SNAKE_CASE )
return Trainer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , callbacks=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case__ : Tuple = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = len(trainer.get_eval_dataloader() )
snake_case__ : Any = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_trainer()
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Optional[Any] = self.get_trainer(disable_tqdm=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.get_trainer()
snake_case__ : List[str] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 38 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : int ) -> bool:
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
snake_case__ : List[str] = str(__magic_name__ )
snake_case__ : str = """""".join(sorted(__magic_name__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def UpperCamelCase__ ( __magic_name__ : float = 99 ) -> int:
'''simple docstring'''
if not 0 < percent < 1_00:
raise ValueError("""solution() only accepts values from 0 to 100""" )
snake_case__ : Union[str, Any] = 0
snake_case__ : int = 1
while True:
if check_bouncy(__magic_name__ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'{solution(99)}')
| 38 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ):
snake_case__ : Any = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : Tuple = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : Union[str, Any] = image_std
def __UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : str = DPTImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 38 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : int = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 38 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """num_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1_6, 4_8, 9_6] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 1_0] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ):
snake_case__ : List[str] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : List[Any] = patch_sizes
snake_case__ : Optional[int] = patch_stride
snake_case__ : Optional[Any] = patch_padding
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : Dict = num_labels
snake_case__ : Optional[Any] = num_channels
snake_case__ : Optional[Any] = embed_dim
snake_case__ : Optional[int] = num_heads
snake_case__ : Optional[int] = stride_kv
snake_case__ : int = depth
snake_case__ : Optional[Any] = cls_token
snake_case__ : List[Any] = attention_drop_rate
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFCvtModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = (self.image_size, self.image_size)
snake_case__ , snake_case__ : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case__ : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case__ : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : str = TFCvtForImageClassification(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtModelTester(self )
snake_case__ : Any = TFCvtConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def __UpperCamelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __UpperCamelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__SCREAMING_SNAKE_CASE )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[int] = outputs.hidden_states
snake_case__ : Tuple = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFCvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : int = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 1 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 38 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
snake_case__ : int = [[1, 2, 4], [1, 2, 3, 4]]
snake_case__ : Any = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCamelCase ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
snake_case__ : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def __UpperCamelCase ( self ):
snake_case__ : List[str] = [[1, 2, 3], [1, 2, 4]]
snake_case__ : Optional[int] = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : Any = dc.update(1 )
snake_case__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : Tuple = dc.update(2 )
snake_case__ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(3 )
snake_case__ : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case__ : int = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 38 | 1 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = str(id_ )
snake_case__ : Dict = None
snake_case__ : List[Any] = None
snake_case__ : Optional[int] = []
snake_case__ : Tuple = {} # {vertex:distance}
def __lt__( self , __SCREAMING_SNAKE_CASE ):
return self.key < other.key
def __repr__( self ):
return self.id
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
self.neighbors.append(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = weight
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __magic_name__ )
graph[b - 1].add_edge(graph[a - 1] , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : list , __magic_name__ : Vertex ) -> list:
'''simple docstring'''
snake_case__ : Optional[int] = []
for u in graph:
snake_case__ : str = math.inf
snake_case__ : List[Any] = None
snake_case__ : Dict = 0
snake_case__ : Tuple = graph[:]
while q:
snake_case__ : Any = min(__magic_name__ )
q.remove(__magic_name__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
snake_case__ : Optional[int] = u
snake_case__ : Dict = u.edges[v.id]
for i in range(1 , len(__magic_name__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCamelCase__ ( __magic_name__ : list , __magic_name__ : Vertex ) -> Iterator[tuple]:
'''simple docstring'''
for u in graph:
snake_case__ : Tuple = math.inf
snake_case__ : Tuple = None
snake_case__ : Optional[int] = 0
snake_case__ : str = list(__magic_name__ )
hq.heapify(__magic_name__ )
while h:
snake_case__ : str = hq.heappop(__magic_name__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
snake_case__ : Union[str, Any] = u
snake_case__ : Dict = u.edges[v.id]
hq.heapify(__magic_name__ )
for i in range(1 , len(__magic_name__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''segformer'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Any = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : List[str] = patch_sizes
snake_case__ : str = strides
snake_case__ : Optional[int] = mlp_ratios
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : List[Any] = decoder_hidden_size
snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = semantic_loss_ignore_index
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 | 1 |
'''simple docstring'''
from math import factorial
def UpperCamelCase__ ( __magic_name__ : int = 1_00 ) -> int:
'''simple docstring'''
return sum(int(__magic_name__ ) for x in str(factorial(__magic_name__ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 38 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38 | 1 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 38 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A_ : Tuple = get_logger(__name__)
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''dummy_data'''
lowerCamelCase__ = '''datasets'''
lowerCamelCase__ = False
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = dataset_name
snake_case__ : Optional[int] = cache_dir
snake_case__ : Union[str, Any] = use_local_dummy_data
snake_case__ : int = config
# download_callbacks take a single url as input
snake_case__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
snake_case__ : List[str] = None
snake_case__ : List[str] = None
@property
def __UpperCamelCase ( self ):
if self._dummy_file is None:
snake_case__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __UpperCamelCase ( self ):
if self._bucket_url is None:
snake_case__ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __UpperCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return path
def __UpperCamelCase ( self ):
return {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
snake_case__ : List[Any] = single_urls
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
snake_case__ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
snake_case__ : List[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : List[str] = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
def _iter_archive_members(__SCREAMING_SNAKE_CASE ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : List[str] = Path(self.dummy_file ).parent
snake_case__ : Dict = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = Path(__SCREAMING_SNAKE_CASE )
snake_case__ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open("""rb""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 | 1 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=9_9 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : Dict = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Any = seq_length
snake_case__ : int = is_training
snake_case__ : List[Any] = use_input_mask
snake_case__ : Dict = use_token_type_ids
snake_case__ : Optional[Any] = use_labels
snake_case__ : Optional[int] = vocab_size
snake_case__ : List[str] = hidden_size
snake_case__ : str = embedding_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : List[Any] = intermediate_size
snake_case__ : List[str] = hidden_act
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : Union[str, Any] = type_vocab_size
snake_case__ : List[str] = type_sequence_label_size
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : Tuple = num_labels
snake_case__ : List[Any] = num_choices
snake_case__ : Union[str, Any] = scope
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Tuple = None
if self.use_input_mask:
snake_case__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : int = None
if self.use_token_type_ids:
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : List[Any] = None
snake_case__ : str = None
snake_case__ : Optional[int] = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = MegatronBertModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[int] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = MegatronBertForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = MegatronBertForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[int] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = MegatronBertForNextSentencePrediction(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Any = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = MegatronBertForPreTraining(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[int] = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , next_sentence_label=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = MegatronBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Tuple = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = self.num_labels
snake_case__ : Optional[int] = MegatronBertForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : int = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : str = MegatronBertForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Tuple = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_choices
snake_case__ : Dict = MegatronBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Tuple = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Optional[int] = config_and_inputs
snake_case__ : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
# test_resize_embeddings = False
lowerCamelCase__ = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
snake_case__ : List[Any] = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
snake_case__ : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
return inputs_dict
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MegatronBertModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return torch.tensor(
__magic_name__ , dtype=torch.long , device=__magic_name__ , )
A_ : str = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip("""Model is not available.""" )
def __UpperCamelCase ( self ):
snake_case__ : int = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
snake_case__ : Union[str, Any] = os.path.join(os.environ["""MYDIR"""] , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = MegatronBertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.half()
snake_case__ : Any = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )[0]
snake_case__ : List[str] = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : str = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
snake_case__ : List[Any] = output[0, ii, jj]
snake_case__ : Optional[int] = expected[3 * ii + jj]
snake_case__ : Dict = """ii={} jj={} a={} b={}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertTrue(math.isclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rel_tol=__SCREAMING_SNAKE_CASE , abs_tol=__SCREAMING_SNAKE_CASE ) , msg=__SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = IFImgaImgSuperResolutionPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self ):
return self._get_superresolution_dummy_components()
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ):
self._test_save_load_local()
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 38 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = KandinskyVaaInpaintPipeline
lowerCamelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
lowerCamelCase__ = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
lowerCamelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase__ = False
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
return self.time_input_dim
@property
def __UpperCamelCase ( self ):
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self ):
return 1_0_0
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Any = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case__ : int = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def __UpperCamelCase ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.dummy_unet
snake_case__ : Optional[int] = self.dummy_movq
snake_case__ : Optional[int] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__SCREAMING_SNAKE_CASE , )
snake_case__ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
snake_case__ : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
snake_case__ : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : int = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create mask
snake_case__ : Optional[Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
snake_case__ : Optional[int] = 0
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Optional[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def __UpperCamelCase ( self ):
snake_case__ : Any = """cpu"""
snake_case__ : Optional[Any] = self.get_dummy_components()
snake_case__ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : str = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : str = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = output.images
snake_case__ : Union[str, Any] = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
snake_case__ : int = image[0, -3:, -3:, -1]
snake_case__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : str = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def __UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
snake_case__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case__ : Tuple = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
snake_case__ : str = 0
snake_case__ : List[str] = """a hat"""
snake_case__ : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
snake_case__ : Union[str, Any] = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case__ , snake_case__ : int = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case__ : List[str] = pipeline(
image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="""np""" , )
snake_case__ : List[Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Dict = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=None , __magic_name__ : List[str]=None ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = True
while ask_again:
snake_case__ : Optional[Any] = input(__magic_name__ )
try:
if default is not None and len(__magic_name__ ) == 0:
return default
return convert_value(__magic_name__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Any=[] , __magic_name__ : Optional[int]=None , __magic_name__ : int=0 ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = BulletMenu(__magic_name__ , __magic_name__ )
snake_case__ : Optional[Any] = menu.run(default_choice=__magic_name__ )
return convert_value(__magic_name__ ) if convert_value is not None else result
def UpperCamelCase__ ( __magic_name__ : Any ) -> int:
'''simple docstring'''
snake_case__ : Tuple = int(__magic_name__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase__ ( __magic_name__ : str ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = int(__magic_name__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = int(__magic_name__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Tuple:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __snake_case ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 38 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : str = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
A_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 38 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__magic_name__ ) / len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 1 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=0 ):
snake_case__ : int = 1.0 if scale is None else scale
snake_case__ : Dict = 0.0 if loc is None else loc
super().__init__(__SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__SCREAMING_SNAKE_CASE )] )
@property
def __UpperCamelCase ( self ):
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCamelCase ( self ):
return self.base_dist.variance * self.scale**2
@property
def __UpperCamelCase ( self ):
return self.variance.sqrt()
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
super().__init__(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = args_dim
snake_case__ : List[Any] = nn.ModuleList([nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
snake_case__ : str = domain_map
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] = [proj(__SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*__SCREAMING_SNAKE_CASE )
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__()
snake_case__ : Any = function
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
return self.function(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self , __SCREAMING_SNAKE_CASE = 1 ):
snake_case__ : List[Any] = dim
snake_case__ : List[str] = {k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if self.dim == 1:
return self.distribution_class(*__SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*__SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[str] = self._base_distribution(__SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__SCREAMING_SNAKE_CASE , loc=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , event_dim=self.event_dim )
@property
def __UpperCamelCase ( self ):
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCamelCase ( self ):
return len(self.event_shape )
@property
def __UpperCamelCase ( self ):
return 0.0
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
return ParameterProjection(
in_features=__SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCamelCase ( self , *__SCREAMING_SNAKE_CASE ):
raise NotImplementedError()
@staticmethod
def __UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
return (x + torch.sqrt(torch.square(__SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = {"df": 1, "loc": 1, "scale": 1}
lowerCamelCase__ = StudentT
@classmethod
def __UpperCamelCase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = cls.squareplus(__SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
snake_case__ : Dict = 2.0 + cls.squareplus(__SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = {"loc": 1, "scale": 1}
lowerCamelCase__ = Normal
@classmethod
def __UpperCamelCase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = cls.squareplus(__SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = {"total_count": 1, "logits": 1}
lowerCamelCase__ = NegativeBinomial
@classmethod
def __UpperCamelCase ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = cls.squareplus(__SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ , snake_case__ : List[str] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None ):
snake_case__ , snake_case__ : Union[str, Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 38 |
'''simple docstring'''
from __future__ import annotations
A_ : str = "Muhammad Umer Farooq"
A_ : Optional[Any] = "MIT"
A_ : int = "1.0.0"
A_ : int = "Muhammad Umer Farooq"
A_ : int = "[email protected]"
A_ : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__()
snake_case__ : list[str] = []
snake_case__ : List[Any] = domain
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case__ : str = parse.urljoin(self.domain , __SCREAMING_SNAKE_CASE )
self.urls.append(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(__magic_name__ ).split(""".""" )[-2:] )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return parse.urlparse(__magic_name__ ).netloc
def UpperCamelCase__ ( __magic_name__ : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
snake_case__ : List[str] = get_domain_name(__magic_name__ )
# Initialize the parser
snake_case__ : Optional[Any] = Parser(__magic_name__ )
try:
# Open URL
snake_case__ : Any = requests.get(__magic_name__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case__ : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case__ : Tuple = requests.get(__magic_name__ )
# Get the valid email.
snake_case__ : List[str] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__magic_name__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__magic_name__ )
if __name__ == "__main__":
A_ : str = emails_from_url("https://github.com")
print(F'{len(emails)} emails found:')
print("\n".join(sorted(emails)))
| 38 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ : Optional[int] = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
A_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 38 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
snake_case__ , snake_case__ : Dict = head.next, head
while fast and fast.next:
snake_case__ : Any = fast.next.next
snake_case__ : int = slow.next
snake_case__ : Dict = slow.next
snake_case__ : List[str] = None # Don't forget here! But forget still works!
# reverse the second part
snake_case__ : Tuple = None
while second:
snake_case__ : Tuple = second.next
snake_case__ : Any = node
snake_case__ : str = second
snake_case__ : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case__ : List[Any] = node.next
snake_case__ : int = head.next
return True
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[Any]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case__ : List[Any] = head
while fast and fast.next:
snake_case__ , snake_case__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case__ : Tuple = [slow.val]
while slow.next:
snake_case__ : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case__ : str = cur.next
return True
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
snake_case__ : int = {}
snake_case__ : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
snake_case__ : Tuple = [pos]
snake_case__ : Optional[Any] = head.next
pos += 1
snake_case__ : int = pos - 1
snake_case__ : str = 0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
snake_case__ : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 38 | 1 |
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
snake_case__ : Optional[Any] = nn.Linear(3 , 4 )
snake_case__ : Union[str, Any] = nn.BatchNormad(4 )
snake_case__ : Optional[Any] = nn.Linear(4 , 5 )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
return self.lineara(self.batchnorm(self.lineara(__SCREAMING_SNAKE_CASE ) ) )
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__SCREAMING_SNAKE_CASE , model.state_dict() )
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , """index.json""" )
self.assertTrue(os.path.isfile(__SCREAMING_SNAKE_CASE ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
snake_case__ : Union[str, Any] = os.path.join(__SCREAMING_SNAKE_CASE , f"{key}.dat" )
self.assertTrue(os.path.isfile(__SCREAMING_SNAKE_CASE ) )
# TODO: add tests on the fact weights are properly loaded
def __UpperCamelCase ( self ):
snake_case__ : Dict = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
snake_case__ : Any = torch.randn(2 , 3 , dtype=__SCREAMING_SNAKE_CASE )
with TemporaryDirectory() as tmp_dir:
snake_case__ : Dict = offload_weight(__SCREAMING_SNAKE_CASE , """weight""" , __SCREAMING_SNAKE_CASE , {} )
snake_case__ : Union[str, Any] = os.path.join(__SCREAMING_SNAKE_CASE , """weight.dat""" )
self.assertTrue(os.path.isfile(__SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""weight""": {"""shape""": [2, 3], """dtype""": str(__SCREAMING_SNAKE_CASE ).split(""".""" )[1]}} )
snake_case__ : int = load_offloaded_weight(__SCREAMING_SNAKE_CASE , index["""weight"""] )
self.assertTrue(torch.equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = ModelForTest()
snake_case__ : str = model.state_dict()
snake_case__ : Dict = {k: v for k, v in state_dict.items() if """linear2""" not in k}
snake_case__ : List[str] = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = OffloadedWeightsLoader(state_dict=__SCREAMING_SNAKE_CASE , save_folder=__SCREAMING_SNAKE_CASE )
# Every key is there with the right value
self.assertEqual(sorted(__SCREAMING_SNAKE_CASE ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , weight_map[key] ) )
snake_case__ : Optional[Any] = {k: v for k, v in state_dict.items() if """weight""" in k}
snake_case__ : Optional[int] = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = OffloadedWeightsLoader(state_dict=__SCREAMING_SNAKE_CASE , save_folder=__SCREAMING_SNAKE_CASE )
# Every key is there with the right value
self.assertEqual(sorted(__SCREAMING_SNAKE_CASE ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Duplicates are removed
snake_case__ : List[str] = OffloadedWeightsLoader(state_dict=__SCREAMING_SNAKE_CASE , save_folder=__SCREAMING_SNAKE_CASE )
# Every key is there with the right value
self.assertEqual(sorted(__SCREAMING_SNAKE_CASE ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , weight_map[key] ) )
def __UpperCamelCase ( self ):
snake_case__ : str = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
snake_case__ : str = extract_submodules_state_dict(__SCREAMING_SNAKE_CASE , ["""a.1""", """a.2"""] )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""a.1""": 0, """a.2""": 2} )
snake_case__ : Dict = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
snake_case__ : Dict = extract_submodules_state_dict(__SCREAMING_SNAKE_CASE , ["""a.1""", """a.2"""] )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""a.1.a""": 0, """a.2.a""": 2} )
| 38 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ : str = 250004
A_ : str = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = MBartTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case__ : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : int = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : Tuple = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case__ : Any = tempfile.mkdtemp()
snake_case__ : Optional[int] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : List[Any] = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : Union[str, Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case__ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = '''facebook/mbart-large-en-ro'''
lowerCamelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase__ = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __UpperCamelCase ( cls ):
snake_case__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
snake_case__ : Any = 1
return cls
def __UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
snake_case__ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case__ : List[Any] = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = 1_0
snake_case__ : int = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = tempfile.mkdtemp()
snake_case__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = MBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case__ : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case__ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
snake_case__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1_0 , return_tensors="""pt""" )
snake_case__ : str = targets["""input_ids"""]
snake_case__ : Optional[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 38 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : int ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(__magic_name__ , __magic_name__ ):
return 0
elif n == 2:
return 1
else:
snake_case__ : int = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase__ ( __magic_name__ : int ) -> int:
'''simple docstring'''
snake_case__ : List[Any] = 0
snake_case__ : str = 2
while digits < n:
index += 1
snake_case__ : List[Any] = len(str(fibonacci(__magic_name__ ) ) )
return index
def UpperCamelCase__ ( __magic_name__ : int = 10_00 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(__magic_name__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 38 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''bit'''
lowerCamelCase__ = ['''preactivation''', '''bottleneck''']
lowerCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="preactivation" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ : Tuple = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
snake_case__ : List[str] = num_channels
snake_case__ : Tuple = embedding_size
snake_case__ : str = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : List[Any] = layer_type
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = global_padding
snake_case__ : List[str] = num_groups
snake_case__ : str = drop_path_rate
snake_case__ : List[Any] = embedding_dynamic_padding
snake_case__ : List[str] = output_stride
snake_case__ : Dict = width_factor
snake_case__ : List[str] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38 | 1 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
A_ : Dict = logging.get_logger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = ['''audio_values''', '''audio_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=2_0_4_8 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=[1_6, 1_6] , __SCREAMING_SNAKE_CASE=1_2_8 , __SCREAMING_SNAKE_CASE=4_4_1_0_0 , __SCREAMING_SNAKE_CASE=8_6 , __SCREAMING_SNAKE_CASE=2_0_4_8 , __SCREAMING_SNAKE_CASE=0.0 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
snake_case__ : Tuple = spectrogram_length
snake_case__ : List[Any] = num_channels
snake_case__ : List[str] = patch_size
snake_case__ : Dict = feature_size // self.patch_size[1]
snake_case__ : Optional[Any] = n_fft
snake_case__ : Optional[int] = sampling_rate // hop_length_to_sampling_rate
snake_case__ : Optional[int] = sampling_rate
snake_case__ : str = padding_value
snake_case__ : Tuple = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
snake_case__ : List[str] = log_spec[:, :-1]
snake_case__ : Optional[Any] = log_spec - 20.0
snake_case__ : List[str] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
f" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
snake_case__ : List[str] = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
snake_case__ : int = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case__ : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
snake_case__ : Optional[int] = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case__ : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case__ : Union[str, Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
snake_case__ : str = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
snake_case__ : Union[str, Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
snake_case__ : int = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
snake_case__ : List[str] = np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa )
# convert into correct format for padding
snake_case__ : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
snake_case__ : Dict = np.ones([len(__SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
snake_case__ : int = padded_audio_features * self.padding_value
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
snake_case__ : str = audio_features[i]
snake_case__ : Tuple = feature
# return as BatchFeature
if return_attention_mask:
snake_case__ : int = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
snake_case__ : Any = {"""audio_values""": padded_audio_features}
snake_case__ : Union[str, Any] = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 38 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=False ) -> Tuple:
'''simple docstring'''
snake_case__ : int = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : int = """"""
else:
snake_case__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = dct.pop(__magic_name__ )
snake_case__ : Dict = val
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int=False ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=__magic_name__ , )
snake_case__ : Optional[int] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=3_84 , num_labels=10_00 )
snake_case__ : Union[str, Any] = False
# load original model from timm
snake_case__ : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case__ : int = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : str = """huggingface/label-files"""
snake_case__ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case__ : Dict = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : str = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case__ : Union[str, Any] = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case__ : Optional[Any] = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case__ : Union[str, Any] = transform.transforms
snake_case__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case__ : Any = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ : Any = prepare_img()
snake_case__ : int = transform(__magic_name__ ).unsqueeze(0 )
snake_case__ : List[str] = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case__ : Optional[Any] = model(__magic_name__ )
snake_case__ : Union[str, Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
snake_case__ : Dict = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1E-3 )
else:
snake_case__ : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 38 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
A_ : Optional[Any] = TypeVar("KT")
A_ : str = TypeVar("VT")
class __snake_case ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE = "root" , __SCREAMING_SNAKE_CASE = None ):
snake_case__ : Optional[int] = key
snake_case__ : List[str] = value
snake_case__ : list[Node[KT, VT]] = []
def __repr__( self ):
return f"Node({self.key}: {self.value})"
@property
def __UpperCamelCase ( self ):
return len(self.forward )
class __snake_case ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE = 0.5 , __SCREAMING_SNAKE_CASE = 1_6 ):
snake_case__ : Node[KT, VT] = Node[KT, VT]()
snake_case__ : int = 0
snake_case__ : List[Any] = p
snake_case__ : str = max_level
def __str__( self ):
snake_case__ : List[Any] = list(self )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return f"SkipList(level={self.level})"
snake_case__ : Dict = max((len(str(__SCREAMING_SNAKE_CASE ) ) for item in items) , default=4 )
snake_case__ : str = max(__SCREAMING_SNAKE_CASE , 4 ) + 4
snake_case__ : List[str] = self.head
snake_case__ : Any = []
snake_case__ : Optional[Any] = node.forward.copy()
lines.append(f"[{node.key}]".ljust(__SCREAMING_SNAKE_CASE , """-""" ) + """* """ * len(__SCREAMING_SNAKE_CASE ) )
lines.append(""" """ * label_size + """| """ * len(__SCREAMING_SNAKE_CASE ) )
while len(node.forward ) != 0:
snake_case__ : Union[str, Any] = node.forward[0]
lines.append(
f"[{node.key}]".ljust(__SCREAMING_SNAKE_CASE , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = node.forward
lines.append("""None""".ljust(__SCREAMING_SNAKE_CASE ) + """* """ * len(__SCREAMING_SNAKE_CASE ) )
return f"SkipList(level={self.level})\n" + "\n".join(__SCREAMING_SNAKE_CASE )
def __iter__( self ):
snake_case__ : Tuple = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case__ : Union[str, Any] = node.forward[0]
def __UpperCamelCase ( self ):
snake_case__ : Any = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = []
snake_case__ : Union[str, Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case__ : Optional[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__SCREAMING_SNAKE_CASE )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ , snake_case__ : Optional[int] = self._locate_node(__SCREAMING_SNAKE_CASE )
if node is not None:
for i, update_node in enumerate(__SCREAMING_SNAKE_CASE ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case__ : List[str] = node.forward[i]
else:
snake_case__ : Tuple = update_node.forward[:i]
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ , snake_case__ : List[Any] = self._locate_node(__SCREAMING_SNAKE_CASE )
if node is not None:
snake_case__ : str = value
else:
snake_case__ : Optional[int] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __SCREAMING_SNAKE_CASE ):
update_vector.append(self.head )
snake_case__ : Union[str, Any] = level
snake_case__ : Union[str, Any] = Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Dict = new_node
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ , snake_case__ : Optional[Any] = self._locate_node(__SCREAMING_SNAKE_CASE )
if node is not None:
return node.value
return None
def UpperCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
snake_case__ : List[Any] = skip_list.head
snake_case__ : int = {}
while node.level != 0:
snake_case__ : int = node.forward[0]
snake_case__ : Union[str, Any] = node.value
assert len(__magic_name__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def UpperCamelCase__ ( ) -> int:
'''simple docstring'''
snake_case__ : Any = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
snake_case__ : Optional[Any] = skip_list.head
snake_case__ : str = {}
while node.level != 0:
snake_case__ : List[Any] = node.forward[0]
snake_case__ : Optional[int] = node.value
if len(__magic_name__ ) != 4:
print()
assert len(__magic_name__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def UpperCamelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = SkipList()
assert skip_list.find("""Some key""" ) is None
def UpperCamelCase__ ( ) -> Any:
'''simple docstring'''
snake_case__ : Optional[Any] = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def UpperCamelCase__ ( ) -> int:
'''simple docstring'''
snake_case__ : int = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def UpperCamelCase__ ( ) -> List[str]:
'''simple docstring'''
snake_case__ : Any = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def UpperCamelCase__ ( ) -> List[Any]:
'''simple docstring'''
snake_case__ : Dict = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def UpperCamelCase__ ( ) -> List[Any]:
'''simple docstring'''
snake_case__ : int = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 1_42 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(__magic_name__ : Any ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__magic_name__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def UpperCamelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
def is_sorted(__magic_name__ : List[Any] ):
return all(next_item >= item for item, next_item in zip(__magic_name__ , lst[1:] ) )
snake_case__ : Optional[Any] = SkipList()
for i in range(10 ):
skip_list.insert(__magic_name__ , __magic_name__ )
assert is_sorted(list(__magic_name__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__magic_name__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(__magic_name__ ) )
def UpperCamelCase__ ( ) -> int:
'''simple docstring'''
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Any = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 42
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE=True , ):
super().__init__()
snake_case__ : str = layers_per_block
snake_case__ : int = torch.nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : List[Any] = None
snake_case__ : List[Any] = nn.ModuleList([] )
# down
snake_case__ : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = output_channel
snake_case__ : Union[str, Any] = block_out_channels[i]
snake_case__ : int = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : str = get_down_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
snake_case__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# out
snake_case__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : str = 2 * out_channels if double_z else out_channels
snake_case__ : int = nn.Convad(block_out_channels[-1] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : Union[str, Any] = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = x
snake_case__ : int = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
snake_case__ : List[str] = down_block(__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
snake_case__ : Any = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : str = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE="group" , ):
super().__init__()
snake_case__ : Any = layers_per_block
snake_case__ : Optional[Any] = nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : Union[str, Any] = None
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : Optional[int] = in_channels if norm_type == """spatial""" else None
# mid
snake_case__ : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# up
snake_case__ : List[Any] = list(reversed(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = output_channel
snake_case__ : Optional[Any] = reversed_block_out_channels[i]
snake_case__ : List[str] = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : int = get_up_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , resnet_time_scale_shift=__SCREAMING_SNAKE_CASE , )
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : int = output_channel
# out
if norm_type == "spatial":
snake_case__ : List[Any] = SpatialNorm(block_out_channels[0] , __SCREAMING_SNAKE_CASE )
else:
snake_case__ : Any = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : Union[str, Any] = nn.Convad(block_out_channels[0] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : int = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Union[str, Any] = z
snake_case__ : Any = self.conv_in(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
snake_case__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
snake_case__ : int = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : List[Any] = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : Dict = up_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
snake_case__ : Optional[Any] = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : str = self.conv_norm_out(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="random" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ):
super().__init__()
snake_case__ : int = n_e
snake_case__ : Optional[int] = vq_embed_dim
snake_case__ : int = beta
snake_case__ : Optional[int] = legacy
snake_case__ : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
snake_case__ : List[str] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
snake_case__ : Optional[Any] = self.used.shape[0]
snake_case__ : List[str] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
snake_case__ : Dict = self.re_embed
snake_case__ : List[str] = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
snake_case__ : Union[str, Any] = n_e
snake_case__ : str = sane_index_shape
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : Dict = inds.reshape(ishape[0] , -1 )
snake_case__ : Any = self.used.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = (inds[:, :, None] == used[None, None, ...]).long()
snake_case__ : List[Any] = match.argmax(-1 )
snake_case__ : List[str] = match.sum(2 ) < 1
if self.unknown_index == "random":
snake_case__ : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
snake_case__ : Optional[Any] = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : int = inds.reshape(ishape[0] , -1 )
snake_case__ : Optional[int] = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
snake_case__ : List[Any] = 0 # simply set to zero
snake_case__ : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
# reshape z -> (batch, height, width, channel) and flatten
snake_case__ : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
snake_case__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
snake_case__ : Dict = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
snake_case__ : Union[str, Any] = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
snake_case__ : Tuple = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
snake_case__ : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
snake_case__ : Any = z + (z_q - z).detach()
# reshape back to match original input shape
snake_case__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
snake_case__ : List[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
snake_case__ : str = self.remap_to_used(__SCREAMING_SNAKE_CASE )
snake_case__ : str = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
snake_case__ : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
snake_case__ : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
snake_case__ : Optional[int] = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
snake_case__ : int = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
snake_case__ : str = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
snake_case__ : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
snake_case__ : Tuple = parameters
snake_case__ , snake_case__ : Any = torch.chunk(__SCREAMING_SNAKE_CASE , 2 , dim=1 )
snake_case__ : Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
snake_case__ : Optional[int] = deterministic
snake_case__ : Optional[int] = torch.exp(0.5 * self.logvar )
snake_case__ : Any = torch.exp(self.logvar )
if self.deterministic:
snake_case__ : List[str] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE = None ):
# make sure sample is on the same device as the parameters and has same dtype
snake_case__ : Dict = randn_tensor(
self.mean.shape , generator=__SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
snake_case__ : Optional[int] = self.mean + self.std * sample
return x
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
snake_case__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
return self.mean
| 38 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : str = tempfile.mkdtemp()
snake_case__ : str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
snake_case__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
snake_case__ : List[Any] = {
"""do_resize""": True,
"""size""": {"""height""": 2_2_4, """width""": 2_2_4},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 1_8, """width""": 1_8},
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
"""do_convert_rgb""": True,
}
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Dict = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case__ : Optional[Any] = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : Optional[Any] = self.get_rust_tokenizer()
snake_case__ : Dict = self.get_image_processor()
snake_case__ : int = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
snake_case__ : Optional[int] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
snake_case__ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Any = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Any = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
snake_case__ : int = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=__SCREAMING_SNAKE_CASE )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : int = self.get_image_processor()
snake_case__ : Tuple = self.get_tokenizer()
snake_case__ : Optional[Any] = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = self.prepare_image_inputs()
snake_case__ : List[str] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
snake_case__ : Optional[Any] = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_image_processor()
snake_case__ : Any = self.get_tokenizer()
snake_case__ : Tuple = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """Alexandra,T-shirt的价格是15便士。"""
snake_case__ : List[Any] = processor(text=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer(__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_image_processor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : Tuple = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Alexandra,T-shirt的价格是15便士。"""
snake_case__ : Union[str, Any] = self.prepare_image_inputs()
snake_case__ : Tuple = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : Optional[int] = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : Union[str, Any] = processor.batch_decode(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : int = self.get_image_processor()
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : str = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Alexandra,T-shirt的价格是15便士。"""
snake_case__ : List[str] = self.prepare_image_inputs()
snake_case__ : Tuple = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 38 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1, 3_8_4, 2_4, 2_4] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : str = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : str = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : str = backbone_out_indices
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Dict = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : str = backbone_featmap_shape
snake_case__ : List[Any] = scope
snake_case__ : Optional[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
snake_case__ : List[Any] = (image_size // patch_size) ** 2
snake_case__ : Union[str, Any] = num_patches + 1
def __UpperCamelCase ( self ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
snake_case__ : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [9_6, 1_9_2, 3_8_4, 7_6_8],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = DPTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : str = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : Dict = DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = DPTModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
snake_case__ : Optional[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = False
snake_case__ : str = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
snake_case__ : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(config=__SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
snake_case__ : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
snake_case__ : Optional[int] = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase ( self ):
pass
@slow
def __UpperCamelCase ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
snake_case__ : List[str] = DPTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = """add"""
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
snake_case__ : Union[str, Any] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = outputs.predicted_depth
# verify the predicted depth
snake_case__ : Any = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 1 |
'''simple docstring'''
from math import pi
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int ) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 38 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case__ : int = botoa.client("""iam""" )
snake_case__ : Union[str, Any] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__magic_name__ , AssumeRolePolicyDocument=json.dumps(__magic_name__ , indent=2 ) )
snake_case__ : Dict = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__magic_name__ , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(__magic_name__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def UpperCamelCase__ ( __magic_name__ : Any ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = botoa.client("""iam""" )
return iam_client.get_role(RoleName=__magic_name__ )["Role"]["Arn"]
def UpperCamelCase__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __magic_name__ , )
snake_case__ : List[Any] = None
if credentials_configuration == 0:
snake_case__ : Dict = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
snake_case__ : List[str] = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
snake_case__ : List[str] = _ask_field("""AWS Access Key ID: """ )
snake_case__ : int = aws_access_key_id
snake_case__ : Optional[Any] = _ask_field("""AWS Secret Access Key: """ )
snake_case__ : List[str] = aws_secret_access_key
snake_case__ : Tuple = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
snake_case__ : Optional[int] = aws_region
snake_case__ : int = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __magic_name__ , )
if role_management == 0:
snake_case__ : Optional[Any] = _ask_field("""Enter your IAM role name: """ )
else:
snake_case__ : Optional[int] = """accelerate_sagemaker_execution_role"""
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(__magic_name__ )
snake_case__ : Dict = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Any = None
if is_custom_docker_image:
snake_case__ : str = _ask_field("""Enter your Docker image: """ , lambda __magic_name__ : str(__magic_name__ ).lower() )
snake_case__ : Tuple = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : List[Any] = None
if is_sagemaker_inputs_enabled:
snake_case__ : str = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Optional[int] = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Optional[Any] = None
if is_sagemaker_metrics_enabled:
snake_case__ : List[Any] = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Tuple = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
snake_case__ : Any = {}
snake_case__ : List[Any] = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
snake_case__ : str = """dynamo_"""
snake_case__ : Tuple = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case__ : List[str] = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
snake_case__ : str = _ask_options(
"""Which mode do you want to use?""" , __magic_name__ , lambda __magic_name__ : TORCH_DYNAMO_MODES[int(__magic_name__ )] , default="""default""" , )
snake_case__ : Union[str, Any] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : str = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Dict = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
snake_case__ : List[str] = _ask_options(
__magic_name__ , __magic_name__ , lambda __magic_name__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__magic_name__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case__ : Optional[int] = _ask_field(__magic_name__ , lambda __magic_name__ : str(__magic_name__ ).lower() , default="""ml.p3.2xlarge""" )
snake_case__ : Dict = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case__ : Optional[Any] = _ask_field(
"""How many machines do you want use? [1]: """ , __magic_name__ , default=1 , )
snake_case__ : Union[str, Any] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=__magic_name__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__magic_name__ , use_cpu=__magic_name__ , dynamo_config=__magic_name__ , eca_instance_type=__magic_name__ , profile=__magic_name__ , region=__magic_name__ , iam_role_name=__magic_name__ , mixed_precision=__magic_name__ , num_machines=__magic_name__ , sagemaker_inputs_file=__magic_name__ , sagemaker_metrics_file=__magic_name__ , )
| 38 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
A_ : Dict = logging.get_logger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase__ ( __magic_name__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
snake_case__ : Union[str, Any] = f"https://www.amazon.in/laptop/s?k={product}"
snake_case__ : List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
snake_case__ : int = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
snake_case__ : Optional[int] = item.ha.text
snake_case__ : Any = """https://www.amazon.in/""" + item.ha.a["""href"""]
snake_case__ : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
snake_case__ : Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
snake_case__ : Optional[int] = """Not available"""
try:
snake_case__ : Tuple = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
snake_case__ : Optional[Any] = """"""
try:
snake_case__ : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_00 )
except ValueError:
snake_case__ : List[Any] = float("""nan""" )
except AttributeError:
pass
snake_case__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : List[Any] = """ """
snake_case__ : Union[str, Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : int = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 38 | 1 |
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase__ ( __magic_name__ : List[str]=2 , __magic_name__ : Tuple=3 , __magic_name__ : Tuple=16 , __magic_name__ : int = 10 , __magic_name__ : int = 2 ) -> Optional[Any]:
'''simple docstring'''
def get_dataset(__magic_name__ : List[str] ):
snake_case__ : int = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__magic_name__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
snake_case__ : Optional[Any] = get_dataset(__magic_name__ )
snake_case__ : Union[str, Any] = get_dataset(__magic_name__ )
snake_case__ : int = DataLoader(__magic_name__ , shuffle=__magic_name__ , batch_size=__magic_name__ , num_workers=4 )
snake_case__ : Optional[int] = DataLoader(__magic_name__ , shuffle=__magic_name__ , batch_size=__magic_name__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Optional[int]=None ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = []
for epoch in range(__magic_name__ ):
# Train quickly
model.train()
for batch in dataloader:
snake_case__ , snake_case__ : Tuple = batch
snake_case__ : int = model(__magic_name__ )
snake_case__ : Union[str, Any] = torch.nn.functional.mse_loss(__magic_name__ , __magic_name__ )
accelerator.backward(__magic_name__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
snake_case__ : int = nn.Parameter(torch.randn(1 ) )
snake_case__ : Tuple = nn.Parameter(torch.randn(1 ) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
return x * self.a + self.b
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
snake_case__ : Optional[int] = DummyModel()
snake_case__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
snake_case__ , snake_case__ : Dict = dummy_dataloaders()
snake_case__ : List[str] = ProjectConfiguration(total_limit=1 , project_dir=__SCREAMING_SNAKE_CASE , automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
# Train baseline
snake_case__ : Any = Accelerator(project_config=__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
snake_case__ : Any = DummyModel()
snake_case__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
snake_case__ , snake_case__ : Optional[Any] = dummy_dataloaders()
# Train baseline
snake_case__ : Union[str, Any] = Accelerator()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save initial
snake_case__ : Dict = os.path.join(__SCREAMING_SNAKE_CASE , """initial""" )
accelerator.save_state(__SCREAMING_SNAKE_CASE )
((snake_case__) , (snake_case__)) : List[str] = model.a.item(), model.b.item()
snake_case__ : List[Any] = optimizer.state_dict()
snake_case__ : Optional[Any] = train(3 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
((snake_case__) , (snake_case__)) : str = model.a.item(), model.b.item()
snake_case__ : Dict = optimizer.state_dict()
# Train partially
set_seed(4_2 )
snake_case__ : Dict = DummyModel()
snake_case__ : Any = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
snake_case__ , snake_case__ : Optional[Any] = dummy_dataloaders()
snake_case__ : Optional[Any] = Accelerator()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Any = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.load_state(__SCREAMING_SNAKE_CASE )
((snake_case__) , (snake_case__)) : Any = model.a.item(), model.b.item()
snake_case__ : List[str] = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = train(2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save everything
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , """checkpoint""" )
accelerator.save_state(__SCREAMING_SNAKE_CASE )
# Load everything back in and make sure all states work
accelerator.load_state(__SCREAMING_SNAKE_CASE )
test_rands += train(1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
((snake_case__) , (snake_case__)) : Union[str, Any] = model.a.item(), model.b.item()
snake_case__ : str = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
snake_case__ : Union[str, Any] = DummyModel()
snake_case__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
snake_case__ , snake_case__ : Any = dummy_dataloaders()
snake_case__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
# Train baseline
snake_case__ : Optional[Any] = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
((snake_case__) , (snake_case__)) : Union[str, Any] = model.a.item(), model.b.item()
snake_case__ : List[str] = optimizer.state_dict()
snake_case__ : Any = train(3 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
((snake_case__) , (snake_case__)) : Optional[int] = model.a.item(), model.b.item()
snake_case__ : int = optimizer.state_dict()
# Train partially
set_seed(4_2 )
snake_case__ : Any = DummyModel()
snake_case__ : Any = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
snake_case__ , snake_case__ : Union[str, Any] = dummy_dataloaders()
snake_case__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_0""" ) )
((snake_case__) , (snake_case__)) : Any = model.a.item(), model.b.item()
snake_case__ : List[str] = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = train(2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
((snake_case__) , (snake_case__)) : Tuple = model.a.item(), model.b.item()
snake_case__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = torch.tensor([1, 2, 3] )
snake_case__ : List[Any] = torch.tensor([2, 3, 4] )
snake_case__ : Union[str, Any] = DummyModel()
snake_case__ : Any = torch.optim.Adam(net.parameters() )
snake_case__ : Union[str, Any] = Accelerator()
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as ve:
accelerator.register_for_checkpointing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
snake_case__ : List[str] = DummyModel()
snake_case__ : Tuple = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
snake_case__ : Union[str, Any] = torch.optim.lr_scheduler.StepLR(__SCREAMING_SNAKE_CASE , step_size=1 , gamma=0.99 )
snake_case__ , snake_case__ : Dict = dummy_dataloaders()
snake_case__ : List[str] = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
# Train baseline
snake_case__ : Optional[Any] = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Tuple = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
snake_case__ : Any = scheduler.state_dict()
train(3 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotEqual(__SCREAMING_SNAKE_CASE , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(__SCREAMING_SNAKE_CASE , scheduler.state_dict() )
def __UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
snake_case__ : str = DummyModel()
snake_case__ : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE , total_limit=2 )
# Train baseline
snake_case__ : Any = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = accelerator.prepare(__SCREAMING_SNAKE_CASE )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __UpperCamelCase ( self ):
snake_case__ : Tuple = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = "/tmp/accelerate/state_checkpointing"
A_ : List[str] = DummyModel()
A_ : List[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
A_ : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[str] = dummy_dataloaders()
A_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : Tuple = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : Optional[int] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Any = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : Optional[int] = group["params"][0].device
break
assert param_device.type == accelerator.device.type
A_ : List[str] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
A_ : List[Any] = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
A_ : Optional[int] = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 38 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LongformerTokenizer
lowerCamelCase__ = True
lowerCamelCase__ = LongformerTokenizerFast
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Optional[int] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Any = {"""unk_token""": """<unk>"""}
snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = """lower newer"""
snake_case__ : Dict = """lower newer"""
return input_text, output_text
def __UpperCamelCase ( self ):
snake_case__ : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = """lower newer"""
snake_case__ : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokens + [tokenizer.unk_token]
snake_case__ : List[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
snake_case__ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : int = """Encode this sequence."""
snake_case__ : Union[str, Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
snake_case__ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
snake_case__ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
snake_case__ : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
snake_case__ : Dict = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Encode <mask> sequence"""
snake_case__ : Tuple = """Encode <mask>sequence"""
snake_case__ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : str = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """A, <mask> AllenNLP sentence."""
snake_case__ : str = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __UpperCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case__ : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""trim_offsets"""] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ : Any = f"{text_of_1_token} {text_of_1_token}"
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 38 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = num_of_nodes
snake_case__ : list[list[int]] = []
snake_case__ : dict[int, int] = {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.m_edges.append([u_node, v_node, weight] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
snake_case__ : Union[str, Any] = self.find_component(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if component_size[u_node] <= component_size[v_node]:
snake_case__ : List[str] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__SCREAMING_SNAKE_CASE )
elif component_size[u_node] >= component_size[v_node]:
snake_case__ : Optional[Any] = self.find_component(__SCREAMING_SNAKE_CASE )
component_size[u_node] += component_size[v_node]
self.set_component(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = []
snake_case__ : Tuple = 0
snake_case__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
snake_case__ : Any = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
snake_case__ , snake_case__ , snake_case__ : List[Any] = edge
snake_case__ : Dict = self.m_component[u]
snake_case__ : Any = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
snake_case__ : Optional[int] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ , snake_case__ , snake_case__ : int = edge
snake_case__ : List[Any] = self.m_component[u]
snake_case__ : Any = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
snake_case__ : str = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Any = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''resnet'''
lowerCamelCase__ = ['''basic''', '''bottleneck''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
snake_case__ : List[Any] = num_channels
snake_case__ : str = embedding_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : Dict = depths
snake_case__ : List[Any] = layer_type
snake_case__ : int = hidden_act
snake_case__ : Union[str, Any] = downsample_in_first_stage
snake_case__ : Dict = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-3
| 38 | 1 |
'''simple docstring'''
A_ : Union[str, Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Tuple = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> str:
'''simple docstring'''
assert len(str(__magic_name__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
snake_case__ : Optional[Any] = year // 1_00
snake_case__ : int = (5 * (century % 4) + 2) % 7
snake_case__ : Optional[Any] = year % 1_00
snake_case__ : List[str] = centurian % 12
snake_case__ : Tuple = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
snake_case__ : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
snake_case__ : Optional[Any] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 38 | 1 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A_ : Tuple = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
A_ : List[Any] = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Any ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = SavedModel()
snake_case__ : List[str] = []
with open(os.path.join(__magic_name__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
snake_case__ : Union[str, Any] = json.load(__magic_name__ )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__magic_name__ )] )
with open(__magic_name__ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
snake_case__ : Any = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case__ : Any = sorted(__magic_name__ )
snake_case__ : int = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__magic_name__ )
if strict and len(__magic_name__ ) > 0:
raise Exception(f"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(__magic_name__ ) > 0:
print(f"Found the following incompatible ops for the opset {opset}:" )
print(*__magic_name__ , sep="""\n""" )
else:
print(f"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
A_ : Optional[int] = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 38 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
snake_case__ : str = []
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_init_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_evaluate""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_predict""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_save""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_log""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_prediction_step""" )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = tempfile.mkdtemp()
def __UpperCamelCase ( self ):
shutil.rmtree(self.output_dir )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : List[Any] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionModelConfig(a=__SCREAMING_SNAKE_CASE , b=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionPreTrainedModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = TrainingArguments(self.output_dir , disable_tqdm=__SCREAMING_SNAKE_CASE , report_to=[] , **__SCREAMING_SNAKE_CASE )
return Trainer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , callbacks=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case__ : Tuple = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = len(trainer.get_eval_dataloader() )
snake_case__ : Any = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_trainer()
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Optional[Any] = self.get_trainer(disable_tqdm=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.get_trainer()
snake_case__ : List[str] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 38 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="""utf-8""" , check=__SCREAMING_SNAKE_CASE , )
assert hasattr(self , """env""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-single" , instance_count=__SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=__SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
TrainingJobAnalytics(__SCREAMING_SNAKE_CASE ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
def __UpperCamelCase ( self ):
# create estimator
snake_case__ : Optional[int] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
snake_case__ : int = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
snake_case__ : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case__ : Union[str, Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ):
snake_case__ : Any = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : Tuple = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : Union[str, Any] = image_std
def __UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : str = DPTImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 38 | 1 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
A_ : List[Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
A_ : List[Any] = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Dict ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split(""".""" ):
snake_case__ : Optional[int] = getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
snake_case__ : Optional[int] = getattr(__magic_name__ , __magic_name__ ).shape
else:
snake_case__ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
snake_case__ : List[str] = value
elif weight_type == "weight_g":
snake_case__ : Dict = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : List[Any] = value
else:
snake_case__ : Optional[Any] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : List[Any] ) -> str:
'''simple docstring'''
snake_case__ : List[str] = []
snake_case__ : Any = fairseq_model.state_dict()
snake_case__ : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == """group""" , )
snake_case__ : Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case__ : List[str] = True
if "*" in mapped_key:
snake_case__ : Union[str, Any] = name.split(__magic_name__ )[0].split(""".""" )[-2]
snake_case__ : List[str] = mapped_key.replace("""*""" , __magic_name__ )
if "weight_g" in name:
snake_case__ : Any = """weight_g"""
elif "weight_v" in name:
snake_case__ : int = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
snake_case__ : Dict = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[int] = """weight"""
else:
snake_case__ : Tuple = None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f"Unused weights: {unused_weights}" )
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : Tuple ) -> Dict:
'''simple docstring'''
snake_case__ : int = full_name.split("""conv_layers.""" )[-1]
snake_case__ : str = name.split(""".""" )
snake_case__ : Union[str, Any] = int(items[0] )
snake_case__ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
snake_case__ : Dict = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
snake_case__ : Optional[int] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
snake_case__ : Any = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
snake_case__ : Dict = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any]=None ) -> int:
'''simple docstring'''
snake_case__ : Union[str, Any] = torch.load(__magic_name__ )
snake_case__ : str = WavLMConfigOrig(checkpoint["""cfg"""] )
snake_case__ : Tuple = WavLMOrig(__magic_name__ )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
snake_case__ : Union[str, Any] = WavLMConfig.from_pretrained(__magic_name__ )
else:
snake_case__ : Tuple = WavLMConfig()
snake_case__ : str = WavLMModel(__magic_name__ )
recursively_load_weights(__magic_name__ , __magic_name__ )
hf_wavlm.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ : Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 38 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """num_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1_6, 4_8, 9_6] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 1_0] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ):
snake_case__ : List[str] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : List[Any] = patch_sizes
snake_case__ : Optional[int] = patch_stride
snake_case__ : Optional[Any] = patch_padding
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : Dict = num_labels
snake_case__ : Optional[Any] = num_channels
snake_case__ : Optional[Any] = embed_dim
snake_case__ : Optional[int] = num_heads
snake_case__ : Optional[int] = stride_kv
snake_case__ : int = depth
snake_case__ : Optional[Any] = cls_token
snake_case__ : List[Any] = attention_drop_rate
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFCvtModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = (self.image_size, self.image_size)
snake_case__ , snake_case__ : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case__ : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case__ : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : str = TFCvtForImageClassification(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtModelTester(self )
snake_case__ : Any = TFCvtConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def __UpperCamelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __UpperCamelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__SCREAMING_SNAKE_CASE )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[int] = outputs.hidden_states
snake_case__ : Tuple = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFCvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : int = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 1 |
'''simple docstring'''
import numpy as np
A_ : str = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class __snake_case :
'''simple docstring'''
def __init__( self ):
snake_case__ : Dict = np.array(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ , snake_case__ : Optional[Any] = np.where(letter == self.SQUARE )
snake_case__ : Optional[Any] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = message.lower()
snake_case__ : Dict = message.replace(""" """ , """""" )
snake_case__ : str = message.replace("""j""" , """i""" )
snake_case__ : Optional[int] = np.empty((2, len(__SCREAMING_SNAKE_CASE )) )
for letter_index in range(len(__SCREAMING_SNAKE_CASE ) ):
snake_case__ : Any = self.letter_to_numbers(message[letter_index] )
snake_case__ : Union[str, Any] = numbers[0]
snake_case__ : Any = numbers[1]
snake_case__ : int = first_step.reshape(2 * len(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[int] = """"""
for numbers_index in range(len(__SCREAMING_SNAKE_CASE ) ):
snake_case__ : Any = int(second_step[numbers_index * 2] )
snake_case__ : Optional[Any] = int(second_step[(numbers_index * 2) + 1] )
snake_case__ : Any = self.numbers_to_letter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = encoded_message + letter
return encoded_message
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = message.lower()
message.replace(""" """ , """""" )
snake_case__ : str = np.empty(2 * len(__SCREAMING_SNAKE_CASE ) )
for letter_index in range(len(__SCREAMING_SNAKE_CASE ) ):
snake_case__ : List[str] = self.letter_to_numbers(message[letter_index] )
snake_case__ : Dict = numbers[0]
snake_case__ : Any = numbers[1]
snake_case__ : Union[str, Any] = first_step.reshape((2, len(__SCREAMING_SNAKE_CASE )) )
snake_case__ : Optional[int] = """"""
for numbers_index in range(len(__SCREAMING_SNAKE_CASE ) ):
snake_case__ : int = int(second_step[0, numbers_index] )
snake_case__ : str = int(second_step[1, numbers_index] )
snake_case__ : Optional[Any] = self.numbers_to_letter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = decoded_message + letter
return decoded_message
| 38 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
snake_case__ : int = [[1, 2, 4], [1, 2, 3, 4]]
snake_case__ : Any = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCamelCase ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
snake_case__ : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def __UpperCamelCase ( self ):
snake_case__ : List[str] = [[1, 2, 3], [1, 2, 4]]
snake_case__ : Optional[int] = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : Any = dc.update(1 )
snake_case__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : Tuple = dc.update(2 )
snake_case__ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(3 )
snake_case__ : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case__ : int = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 38 | 1 |
'''simple docstring'''
from math import factorial
A_ : List[str] = {str(d): factorial(d) for d in range(10)}
def UpperCamelCase__ ( __magic_name__ : int ) -> int:
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(__magic_name__ ) )
def UpperCamelCase__ ( ) -> int:
'''simple docstring'''
snake_case__ : Any = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , __magic_name__ ) if sum_of_digit_factorial(__magic_name__ ) == i )
if __name__ == "__main__":
print(F'{solution() = }')
| 38 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''segformer'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Any = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : List[str] = patch_sizes
snake_case__ : str = strides
snake_case__ : Optional[int] = mlp_ratios
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : List[Any] = decoder_hidden_size
snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = semantic_loss_ignore_index
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 | 1 |
'''simple docstring'''
import os
def UpperCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
with open(os.path.dirname(__magic_name__ ) + """/grid.txt""" ) as f:
snake_case__ : List[str] = [] # noqa: E741
for _ in range(20 ):
l.append([int(__magic_name__ ) for x in f.readline().split()] )
snake_case__ : Optional[int] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case__ : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case__ : List[Any] = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case__ : int = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case__ : List[str] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case__ : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case__ : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
snake_case__ : Optional[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case__ : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 38 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=9_9 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : str = parent
snake_case__ : Dict = 1_3
snake_case__ : Any = 7
snake_case__ : Optional[int] = True
snake_case__ : Optional[Any] = True
snake_case__ : Optional[int] = True
snake_case__ : str = True
snake_case__ : Optional[Any] = 9_9
snake_case__ : Dict = 3_8_4
snake_case__ : Optional[Any] = 2
snake_case__ : List[str] = 4
snake_case__ : Union[str, Any] = 3_7
snake_case__ : Optional[Any] = """gelu"""
snake_case__ : Optional[Any] = 0.1
snake_case__ : Optional[Any] = 0.1
snake_case__ : List[Any] = 5_1_2
snake_case__ : Tuple = 1_6
snake_case__ : Tuple = 2
snake_case__ : List[str] = 0.02
snake_case__ : Tuple = 3
snake_case__ : List[str] = 4
snake_case__ : Optional[Any] = 1_2_8
snake_case__ : str = 2
snake_case__ : Union[str, Any] = 9
snake_case__ : Optional[int] = 1
snake_case__ : List[Any] = None
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : int = None
if self.use_input_mask:
snake_case__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : List[Any] = None
if self.use_token_type_ids:
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : List[str] = None
snake_case__ : Dict = None
snake_case__ : Tuple = None
if self.use_labels:
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFConvBertModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case__ : int = [input_ids, input_mask]
snake_case__ : Any = model(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = TFConvBertForMaskedLM(config=__SCREAMING_SNAKE_CASE )
snake_case__ : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : Tuple = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = self.num_labels
snake_case__ : int = TFConvBertForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = self.num_choices
snake_case__ : List[Any] = TFConvBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
snake_case__ : List[Any] = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
snake_case__ : List[Any] = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
snake_case__ : Optional[int] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = self.num_labels
snake_case__ : List[Any] = TFConvBertForTokenClassification(config=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : str = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = TFConvBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Union[str, Any] = config_and_inputs
snake_case__ : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Dict = TFConvBertModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = True
snake_case__ : List[Any] = True
if hasattr(__SCREAMING_SNAKE_CASE , """use_cache""" ):
snake_case__ : Optional[Any] = True
snake_case__ : str = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
snake_case__ : Dict = getattr(self.model_tester , """key_length""" , __SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Tuple = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = len(model(__SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE , saved_model=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = os.path.join(__SCREAMING_SNAKE_CASE , """saved_model""" , """1""" )
snake_case__ : List[str] = tf.keras.models.load_model(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
snake_case__ : Optional[int] = outputs["""encoder_hidden_states"""]
snake_case__ : str = outputs["""encoder_attentions"""]
else:
snake_case__ : List[str] = outputs["""hidden_states"""]
snake_case__ : Tuple = outputs["""attentions"""]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Tuple = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Optional[Any] = True
snake_case__ : Dict = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
snake_case__ : Optional[Any] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
snake_case__ : List[str] = getattr(self.model_tester , """key_length""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = getattr(self.model_tester , """key_length""" , __SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(__SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
snake_case__ : int = outputs.decoder_attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = True
snake_case__ : Dict = False
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case__ : str = True
snake_case__ : List[str] = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
snake_case__ : List[Any] = True
snake_case__ : Any = True
snake_case__ : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
snake_case__ : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case__ : int = model(__SCREAMING_SNAKE_CASE )[0]
snake_case__ : Any = [1, 6, 7_6_8]
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 )
| 38 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A_ : Tuple = get_logger(__name__)
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''dummy_data'''
lowerCamelCase__ = '''datasets'''
lowerCamelCase__ = False
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = dataset_name
snake_case__ : Optional[int] = cache_dir
snake_case__ : Union[str, Any] = use_local_dummy_data
snake_case__ : int = config
# download_callbacks take a single url as input
snake_case__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
snake_case__ : List[str] = None
snake_case__ : List[str] = None
@property
def __UpperCamelCase ( self ):
if self._dummy_file is None:
snake_case__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __UpperCamelCase ( self ):
if self._bucket_url is None:
snake_case__ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __UpperCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return path
def __UpperCamelCase ( self ):
return {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
snake_case__ : List[Any] = single_urls
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
snake_case__ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
snake_case__ : List[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : List[str] = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
def _iter_archive_members(__SCREAMING_SNAKE_CASE ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : List[str] = Path(self.dummy_file ).parent
snake_case__ : Dict = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = Path(__SCREAMING_SNAKE_CASE )
snake_case__ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open("""rb""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[str] = logging.get_logger(__name__)
A_ : Dict = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''mobilenet_v1'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=2_2_4 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE="relu6" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.999 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.001 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case__ : Optional[Any] = num_channels
snake_case__ : Tuple = image_size
snake_case__ : str = depth_multiplier
snake_case__ : Tuple = min_depth
snake_case__ : List[Any] = hidden_act
snake_case__ : int = tf_padding
snake_case__ : Tuple = classifier_dropout_prob
snake_case__ : Optional[Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def __UpperCamelCase ( self ):
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def __UpperCamelCase ( self ):
return 1e-4
| 38 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = IFImgaImgSuperResolutionPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self ):
return self._get_superresolution_dummy_components()
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ):
self._test_save_load_local()
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 38 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.