code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from collections.abc import Generator
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_: Optional[int] = 0, 1
while True:
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = b, a + b
yield b
def lowerCAmelCase_ (lowerCAmelCase__: int = 1_0_0_0 ):
"""simple docstring"""
UpperCAmelCase_: Any = 1
UpperCAmelCase_: List[str] = fibonacci_generator()
while len(str(next(lowerCAmelCase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 556 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
a : Optional[int] = logging.getLogger(__name__)
@dataclass
class _a ( _lowerCAmelCase ):
A = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
A = field(default=_lowerCAmelCase , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
A = field(default=_lowerCAmelCase , metadata={'''help''': '''whether to use adafactor'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
A = field(default=_lowerCAmelCase , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
A = field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 556 | 1 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_lowerCamelCase =logging.get_logger(__name__)
class a_ :
"""simple docstring"""
def __init__( self : Any ,snake_case : str = None ,snake_case : uuid.UUID = None ,snake_case : Any=None ,snake_case : Optional[int]=None ):
if not conversation_id:
SCREAMING_SNAKE_CASE =uuid.uuida()
if past_user_inputs is None:
SCREAMING_SNAKE_CASE =[]
if generated_responses is None:
SCREAMING_SNAKE_CASE =[]
SCREAMING_SNAKE_CASE =conversation_id
SCREAMING_SNAKE_CASE =past_user_inputs
SCREAMING_SNAKE_CASE =generated_responses
SCREAMING_SNAKE_CASE =text
def __eq__( self : Tuple ,snake_case : Any ):
if not isinstance(snake_case ,snake_case ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _lowerCAmelCase ( self : List[Any] ,snake_case : str ,snake_case : bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".' )
SCREAMING_SNAKE_CASE =text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
SCREAMING_SNAKE_CASE =text
def _lowerCAmelCase ( self : Union[str, Any] ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
SCREAMING_SNAKE_CASE =None
def _lowerCAmelCase ( self : Dict ,snake_case : str ):
self.generated_responses.append(snake_case )
def _lowerCAmelCase ( self : str ):
for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
SCREAMING_SNAKE_CASE ='user' if is_user else 'bot'
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCamelCase_ , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] ,*snake_case : str ,**snake_case : Any ):
super().__init__(*snake_case ,**snake_case )
if self.tokenizer.pad_token_id is None:
SCREAMING_SNAKE_CASE =self.tokenizer.eos_token
def _lowerCAmelCase ( self : int ,snake_case : str=None ,snake_case : Union[str, Any]=None ,snake_case : Any=None ,**snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE ={}
if min_length_for_response is not None:
SCREAMING_SNAKE_CASE =min_length_for_response
if minimum_tokens is not None:
SCREAMING_SNAKE_CASE =minimum_tokens
if "max_length" in generate_kwargs:
SCREAMING_SNAKE_CASE =generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE =clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(snake_case )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[Any] ,snake_case : Union[Conversation, List[Conversation]] ,snake_case : Dict=0 ,**snake_case : List[Any] ):
SCREAMING_SNAKE_CASE =super().__call__(snake_case ,num_workers=snake_case ,**snake_case )
if isinstance(snake_case ,snake_case ) and len(snake_case ) == 1:
return outputs[0]
return outputs
def _lowerCAmelCase ( self : List[Any] ,snake_case : Conversation ,snake_case : Tuple=32 ):
if not isinstance(snake_case ,snake_case ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer ,'_build_conversation_input_ids' ):
SCREAMING_SNAKE_CASE =self.tokenizer._build_conversation_input_ids(snake_case )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
SCREAMING_SNAKE_CASE =self._legacy_parse_and_tokenize(snake_case )
if self.framework == "pt":
SCREAMING_SNAKE_CASE =torch.LongTensor([input_ids] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE =tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Any ,snake_case : List[str]=10 ,**snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =generate_kwargs.get('max_length' ,self.model.config.max_length )
SCREAMING_SNAKE_CASE =model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
SCREAMING_SNAKE_CASE =max_length - minimum_tokens
SCREAMING_SNAKE_CASE =model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
SCREAMING_SNAKE_CASE =model_inputs['attention_mask'][:, -trim:]
SCREAMING_SNAKE_CASE =model_inputs.pop('conversation' )
SCREAMING_SNAKE_CASE =max_length
SCREAMING_SNAKE_CASE =self.model.generate(**snake_case ,**snake_case )
if self.model.config.is_encoder_decoder:
SCREAMING_SNAKE_CASE =1
else:
SCREAMING_SNAKE_CASE =n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Dict ,snake_case : Any=True ):
SCREAMING_SNAKE_CASE =model_outputs['output_ids']
SCREAMING_SNAKE_CASE =self.tokenizer.decode(
output_ids[0] ,skip_special_tokens=snake_case ,clean_up_tokenization_spaces=snake_case ,)
SCREAMING_SNAKE_CASE =model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(snake_case )
return conversation
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Conversation ):
SCREAMING_SNAKE_CASE =self.tokenizer.eos_token_id
SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(snake_case ,add_special_tokens=snake_case ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(snake_case ,add_special_tokens=snake_case ) )
if len(snake_case ) > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE =input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 252 |
def snake_case__ ( lowerCAmelCase_ = 1000000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =limit + 1
SCREAMING_SNAKE_CASE =[0] * limit
for first_term in range(1, lowerCAmelCase_ ):
for n in range(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
SCREAMING_SNAKE_CASE =sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f'{solution() = }')
| 252 | 1 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = nn.functional.normalize(SCREAMING_SNAKE_CASE )
lowerCAmelCase = nn.functional.normalize(SCREAMING_SNAKE_CASE )
return torch.mm(SCREAMING_SNAKE_CASE , normalized_text_embeds.t() )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = CLIPConfig
_SCREAMING_SNAKE_CASE = ['CLIPEncoderLayer']
def __init__( self , lowercase ) -> Union[str, Any]:
super().__init__(lowercase )
lowerCAmelCase = CLIPVisionModel(config.vision_config )
lowerCAmelCase = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowercase )
lowerCAmelCase = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowercase )
lowerCAmelCase = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowercase )
lowerCAmelCase = nn.Parameter(torch.ones(17 ) , requires_grad=lowercase )
lowerCAmelCase = nn.Parameter(torch.ones(3 ) , requires_grad=lowercase )
@torch.no_grad()
def _snake_case ( self , lowercase , lowercase ) -> List[str]:
lowerCAmelCase = self.vision_model(lowercase )[1] # pooled_output
lowerCAmelCase = self.visual_projection(lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase = cosine_distance(lowercase , self.special_care_embeds ).cpu().float().numpy()
lowerCAmelCase = cosine_distance(lowercase , self.concept_embeds ).cpu().float().numpy()
lowerCAmelCase = []
lowerCAmelCase = image_embeds.shape[0]
for i in range(lowercase ):
lowerCAmelCase = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCAmelCase = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
lowerCAmelCase = special_cos_dist[i][concept_idx]
lowerCAmelCase = self.special_care_embeds_weights[concept_idx].item()
lowerCAmelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
lowerCAmelCase = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
lowerCAmelCase = cos_dist[i][concept_idx]
lowerCAmelCase = self.concept_embeds_weights[concept_idx].item()
lowerCAmelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowercase )
result.append(lowercase )
lowerCAmelCase = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _snake_case ( self , lowercase , lowercase ) -> int:
lowerCAmelCase = self.vision_model(lowercase )[1] # pooled_output
lowerCAmelCase = self.visual_projection(lowercase )
lowerCAmelCase = cosine_distance(lowercase , self.special_care_embeds )
lowerCAmelCase = cosine_distance(lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCAmelCase = 0.0
lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCAmelCase = torch.any(special_scores > 0 , dim=1 )
lowerCAmelCase = special_care * 0.01
lowerCAmelCase = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
lowerCAmelCase = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCAmelCase = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 532 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'speech_to_text_2'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , lowercase=10_000 , lowercase=6 , lowercase=2_048 , lowercase=4 , lowercase=0.0 , lowercase=True , lowercase="relu" , lowercase=256 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=2 , lowercase=True , lowercase=1 , lowercase=0 , lowercase=2 , lowercase=1_024 , **lowercase , ) -> Optional[int]:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , **lowercase , )
| 532 | 1 |
from math import pi, sqrt, tan
def _lowerCAmelCase ( __magic_name__ :Optional[int] ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _lowerCAmelCase ( __magic_name__ :Tuple , __magic_name__ :Optional[Any] , __magic_name__ :List[Any] ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _lowerCAmelCase ( __magic_name__ :Tuple ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _lowerCAmelCase ( __magic_name__ :List[str] ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _lowerCAmelCase ( __magic_name__ :int , __magic_name__ :str ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _lowerCAmelCase ( __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :str ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
UpperCAmelCase_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _lowerCAmelCase ( __magic_name__ :Union[str, Any] , __magic_name__ :Union[str, Any] ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _lowerCAmelCase ( __magic_name__ :Optional[Any] , __magic_name__ :int ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(snake_case_ , 2 ) * torus_radius * tube_radius
def _lowerCAmelCase ( __magic_name__ :Union[str, Any] , __magic_name__ :int ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _lowerCAmelCase ( __magic_name__ :Any ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _lowerCAmelCase ( __magic_name__ :int , __magic_name__ :Union[str, Any] ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _lowerCAmelCase ( __magic_name__ :Tuple , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
UpperCAmelCase_ = (sidea + sidea + sidea) / 2
UpperCAmelCase_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _lowerCAmelCase ( __magic_name__ :Optional[Any] , __magic_name__ :List[Any] ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _lowerCAmelCase ( __magic_name__ :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :str ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _lowerCAmelCase ( __magic_name__ :Optional[Any] ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _lowerCAmelCase ( __magic_name__ :List[Any] , __magic_name__ :Tuple ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _lowerCAmelCase ( __magic_name__ :Optional[Any] , __magic_name__ :str ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _lowerCAmelCase ( __magic_name__ :List[Any] , __magic_name__ :List[Any] ):
if not isinstance(snake_case_ , snake_case_ ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print('\nSurface Areas of various geometric shapes: \n')
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 710 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class snake_case__ :
'''simple docstring'''
__A = None
__A = None
__A = None # sigma(t_i)
@classmethod
def UpperCamelCase ( cls : Dict ) -> Optional[int]:
return cls()
@dataclass
class snake_case__ ( __snake_case ):
'''simple docstring'''
__A = 42
__A = 42
__A = 42
class snake_case__ ( __snake_case , __snake_case ):
'''simple docstring'''
@property
def UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
return True
@register_to_config
def __init__( self : Dict , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : float = 1_00 , lowerCAmelCase_ : float = 1.007 , lowerCAmelCase_ : float = 80 , lowerCAmelCase_ : float = 0.05 , lowerCAmelCase_ : float = 50 , ) -> Any:
pass
def UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
return KarrasVeSchedulerState.create()
def UpperCamelCase ( self : List[Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple = () ) -> KarrasVeSchedulerState:
UpperCAmelCase_ = jnp.arange(0 , lowerCAmelCase_ )[::-1].copy()
UpperCAmelCase_ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCAmelCase_ , schedule=jnp.array(lowerCAmelCase_ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase_ , )
def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase_ = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
UpperCAmelCase_ = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase_ = random.split(lowerCAmelCase_ , num=1 )
UpperCAmelCase_ = self.config.s_noise * random.normal(key=lowerCAmelCase_ , shape=sample.shape )
UpperCAmelCase_ = sigma + gamma * sigma
UpperCAmelCase_ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
UpperCAmelCase_ = sample_hat + sigma_hat * model_output
UpperCAmelCase_ = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase_ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
UpperCAmelCase_ = sample_prev + sigma_prev * model_output
UpperCAmelCase_ = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase_ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def UpperCamelCase ( self : str , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Tuple:
raise NotImplementedError()
| 407 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 90 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Dict = (DDPMScheduler,)
def _SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase = -1
else:
lowerCAmelCase = timesteps[i + 1]
lowerCAmelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [1_00, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [1_00, 87, 50, 1, 0]
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 284 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCamelCase = np.zeros((n + 1,) )
_UpperCamelCase = ya
_UpperCamelCase = xa
for k in range(_UpperCamelCase ):
_UpperCamelCase = y[k] + step_size * ode_func(_UpperCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
UpperCamelCase_ : List[Any] = StableDiffusionInpaintPipeline
UpperCamelCase_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCamelCase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase_ : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase_ : int = frozenset([] )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , )
_UpperCamelCase = PNDMScheduler(skip_prk_steps=a )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
_UpperCamelCase = CLIPTextModel(a )
_UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_UpperCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A_ ( self , a , a=0 ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(a ) ).convert("""RGB""" ).resize((64, 64) )
_UpperCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(a ).startswith("""mps""" ):
_UpperCamelCase = torch.manual_seed(a )
else:
_UpperCamelCase = torch.Generator(device=a ).manual_seed(a )
_UpperCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionInpaintPipeline(**a )
_UpperCamelCase = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
_UpperCamelCase = self.get_dummy_inputs(a )
_UpperCamelCase = sd_pipe(**a ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A_ ( self ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
_UpperCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
_UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(a , safety_checker=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
_UpperCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=a , image=a , mask_image=a , generator=a , output_type="""np""" , )
_UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
_UpperCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
_UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
a , torch_dtype=torch.floataa , safety_checker=a , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
_UpperCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=a , image=a , mask_image=a , generator=a , output_type="""np""" , )
_UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def A_ ( self ) -> Dict:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_UpperCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
_UpperCamelCase = PNDMScheduler.from_pretrained(a , subfolder="""scheduler""" )
_UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
a , safety_checker=a , scheduler=a , torch_dtype=torch.floataa , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=a , image=a , mask_image=a , generator=a , num_inference_steps=2 , output_type="""np""" , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 202 | 0 |
def _a ( lowercase__ : str ):
'''simple docstring'''
return "".join(chr(ord(lowercase__ ) - 32 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 85 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""CLIPFeatureExtractor"""]
_lowerCAmelCase = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 569 | 0 |
"""simple docstring"""
def UpperCAmelCase ( UpperCamelCase__=28_123 ):
"""simple docstring"""
A__ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
A__ = set()
A__ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(UpperCamelCase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 718 | """simple docstring"""
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__lowerCamelCase = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
__lowerCamelCase = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
__lowerCamelCase = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
__lowerCamelCase = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
__lowerCamelCase = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
__lowerCamelCase = ""
__lowerCamelCase = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
__lowerCamelCase = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__lowerCamelCase = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
assert ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with pytest.raises(UpperCamelCase__ , match=re.escape(expected_error.format(path='root' ) ) ):
A__ = ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with pytest.raises(UpperCamelCase__ , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
ReadMe.from_string(UpperCamelCase__ , UpperCamelCase__ , suppress_parsing_errors=UpperCamelCase__ )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase__ ) / 'README.md'
with open(UpperCamelCase__ , 'w+' ) as readme_file:
readme_file.write(UpperCamelCase__ )
A__ = ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase__ ) / 'README.md'
with open(UpperCamelCase__ , 'w+' ) as readme_file:
readme_file.write(UpperCamelCase__ )
A__ = expected_error.format(path=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ , match=re.escape(UpperCamelCase__ ) ):
A__ = ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase__ ) / 'README.md'
with open(UpperCamelCase__ , 'w+' ) as readme_file:
readme_file.write(UpperCamelCase__ )
A__ = expected_error.format(path=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ , match=re.escape(UpperCamelCase__ ) ):
ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = Path(UpperCamelCase__ ) / 'README.md'
with open(UpperCamelCase__ , 'w+' ) as readme_file:
readme_file.write(UpperCamelCase__ )
ReadMe.from_readme(UpperCamelCase__ , UpperCamelCase__ , suppress_parsing_errors=UpperCamelCase__ )
| 536 | 0 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowerCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Optional[int] ):
__A : str = get_activation("""swish""" )
self.assertIsInstance(__A , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCAmelCase_ ( self : Optional[int] ):
__A : List[str] = get_activation("""silu""" )
self.assertIsInstance(__A , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCAmelCase_ ( self : Tuple ):
__A : Optional[int] = get_activation("""mish""" )
self.assertIsInstance(__A , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCAmelCase_ ( self : int ):
__A : int = get_activation("""gelu""" )
self.assertIsInstance(__A , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 17 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 5 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return [ord(lowerCAmelCase__ ) - 96 for elem in plain]
def lowercase__ ( __lowercase : Any ) -> Union[str, Any]:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , lowerCAmelCase__ )
print('Decoded:' , decode(lowerCAmelCase__ ) )
if __name__ == "__main__":
main()
| 714 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase__ ( __lowercase : List[str] ) -> Tuple:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def lowercase__ ( __lowercase : Optional[Any] , __lowercase : Dict ) -> Any:
"""simple docstring"""
return (-y * np.log(__lowercase ) - (1 - y) * np.log(1 - h )).mean()
def lowercase__ ( __lowercase : str , __lowercase : str , __lowercase : str ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = np.dot(__lowercase , __lowercase )
return np.sum(y * scores - np.log(1 + np.exp(__lowercase ) ) )
def lowercase__ ( __lowercase : List[Any] , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int=70000 ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = np.zeros(x.shape[1] )
for iterations in range(__lowercase ):
__UpperCamelCase = np.dot(__lowercase , __lowercase )
__UpperCamelCase = sigmoid_function(__lowercase )
__UpperCamelCase = np.dot(x.T , h - y ) / y.size
__UpperCamelCase = theta - alpha * gradient # updating the weights
__UpperCamelCase = np.dot(__lowercase , __lowercase )
__UpperCamelCase = sigmoid_function(__lowercase )
__UpperCamelCase = cost_function(__lowercase , __lowercase )
if iterations % 100 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a__ : Optional[Any] =datasets.load_iris()
a__ : List[str] =iris.data[:, :2]
a__ : Union[str, Any] =(iris.target != 0) * 1
a__ : List[str] =0.1
a__ : List[str] =logistic_reg(alpha, x, y, max_iterations=70_000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def lowercase__ ( __lowercase : Dict ) -> str:
"""simple docstring"""
return sigmoid_function(
np.dot(__lowercase , __lowercase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((a__) , (a__)) : Optional[int] =(x[:, 0].min(), x[:, 0].max())
((a__) , (a__)) : Optional[int] =(x[:, 1].min(), x[:, 1].max())
((a__) , (a__)) : Optional[Any] =np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a__ : str =np.c_[xxa.ravel(), xxa.ravel()]
a__ : Dict =predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 434 | 0 |
"""simple docstring"""
_lowercase = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609_344,
"knot": 1.852,
}
_lowercase = {
"km/h": 1.0,
"m/s": 0.277_777_778,
"mph": 0.621_371_192,
"knot": 0.539_956_803,
}
def _snake_case ( snake_case__ : float , snake_case__ : str , snake_case__ : str ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
A = (
F'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
F'Valid values are: {", ".join(snake_case__ )}'
)
raise ValueError(snake_case__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 91 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 695 | 0 |
'''simple docstring'''
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
"""simple docstring"""
snake_case: Optional[int] =''
for word_or_phrase in separated:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise Exception('join() accepts only strings to be joined' )
joined += word_or_phrase + separator
return joined.strip(__UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 347 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def a_ ( __UpperCAmelCase ) -> Dict:
"""simple docstring"""
snake_case: Optional[int] =[tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a_ ( snake_case , snake_case , snake_case , unittest.TestCase ):
UpperCAmelCase : List[str] = StableDiffusionLatentUpscalePipeline
UpperCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
UpperCAmelCase : List[Any] = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase : Optional[int] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase : str = frozenset([] )
UpperCAmelCase : Any = True
@property
def UpperCamelCase ( self : Dict ) -> List[str]:
snake_case: Optional[int] =1
snake_case: List[Any] =4
snake_case: Optional[Any] =(1_6, 1_6)
snake_case: Union[str, Any] =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a_ )
return image
def UpperCamelCase ( self : Any ) -> Tuple:
torch.manual_seed(0 )
snake_case: Dict =UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=a_ , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=a_ , only_cross_attention=a_ , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
snake_case: int =AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
snake_case: Optional[int] =EulerDiscreteScheduler(prediction_type='sample' )
snake_case: Optional[int] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='quick_gelu' , projection_dim=5_1_2 , )
snake_case: Dict =CLIPTextModel(a_ )
snake_case: List[str] =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case: Tuple ={
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def UpperCamelCase ( self : int , a_ : Optional[int] , a_ : str=0 ) -> List[Any]:
if str(a_ ).startswith('mps' ):
snake_case: str =torch.manual_seed(a_ )
else:
snake_case: List[str] =torch.Generator(device=a_ ).manual_seed(a_ )
snake_case: Optional[Any] ={
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ) -> Any:
snake_case: Optional[Any] ='cpu'
snake_case: List[Any] =self.get_dummy_components()
snake_case: str =self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
snake_case: Union[str, Any] =self.get_dummy_inputs(a_ )
snake_case: Any =pipe(**a_ ).images
snake_case: Dict =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3) )
snake_case: Dict =np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
snake_case: Any =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
def UpperCamelCase ( self : Tuple ) -> List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def UpperCamelCase ( self : str ) -> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCamelCase ( self : Union[str, Any] ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def UpperCamelCase ( self : List[str] ) -> Optional[int]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def UpperCamelCase ( self : Optional[int] ) -> Any:
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCamelCase ( self : Dict ) -> Optional[Any]:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
snake_case: Dict =[
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
snake_case: Union[str, Any] =self.get_dummy_components()
snake_case: str =self.pipeline_class(**a_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
snake_case: str =self.get_dummy_inputs(a_ )
snake_case: Optional[int] =2
snake_case: List[str] =[]
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
snake_case: int =getattr(a_ , scheduler_enum.name )
snake_case: Optional[Any] =scheduler_cls.from_config(pipe.scheduler.config )
snake_case: str =pipe(**a_ )[0]
outputs.append(a_ )
assert check_same_shape(a_ )
@require_torch_gpu
@slow
class a_ ( unittest.TestCase ):
def UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Any ) -> str:
snake_case: List[str] =torch.manual_seed(3_3 )
snake_case: Optional[int] =StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa )
pipe.to('cuda' )
snake_case: Optional[Any] =StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
snake_case: str ='a photo of an astronaut high resolution, unreal engine, ultra realistic'
snake_case: Any =pipe(a_ , generator=a_ , output_type='latent' ).images
snake_case: Optional[int] =upscaler(
prompt=a_ , image=a_ , num_inference_steps=2_0 , guidance_scale=0 , generator=a_ , output_type='np' , ).images[0]
snake_case: str =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def UpperCamelCase ( self : Dict ) -> str:
snake_case: Optional[Any] =torch.manual_seed(3_3 )
snake_case: Union[str, Any] =StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
snake_case: List[str] ='the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
snake_case: Union[str, Any] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
snake_case: List[str] =upscaler(
prompt=a_ , image=a_ , num_inference_steps=2_0 , guidance_scale=0 , generator=a_ , output_type='np' , ).images[0]
snake_case: List[Any] =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 347 | 1 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 41 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
A__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A__ = {"""unk_token""": """<unk>"""}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCAmelCase ) )
A__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
A__ = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : str , **__lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : Dict , **__lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : List[Any] , **__lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a_ ( self : int ) -> Tuple:
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def a_ ( self : int ) -> Any:
"""simple docstring"""
A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
A__ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def a_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCAmelCase , return_tensors="""np""" )
A__ = processor(images=__lowerCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = processor(text=__lowerCAmelCase )
A__ = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def a_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(__lowerCAmelCase )
A__ = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Dict ) -> str:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 176 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 715 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__a = ""
__a = ""
__a = ""
__a = 1 # (0 is vertical, 1 is horizontal)
def A_ ( ):
'''simple docstring'''
snake_case_, snake_case_ :Optional[Any] = get_dataset(_lowercase, _lowercase )
print("""Processing...""" )
snake_case_, snake_case_, snake_case_ :str = update_image_and_anno(_lowercase, _lowercase, _lowercase )
for index, image in enumerate(_lowercase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case_ :str = random_chars(32 )
snake_case_ :Optional[Any] = paths[index].split(os.sep )[-1].rsplit(""".""", 1 )[0]
snake_case_ :List[str] = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""", _lowercase, [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(_lowercase )} with {file_name}""" )
snake_case_ :int = []
for anno in new_annos[index]:
snake_case_ :Union[str, Any] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(_lowercase )
with open(f"""/{file_root}.txt""", """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Dict = []
snake_case_ :List[Any] = []
for label_file in glob.glob(os.path.join(_lowercase, """*.txt""" ) ):
snake_case_ :List[str] = label_file.split(os.sep )[-1].rsplit(""".""", 1 )[0]
with open(_lowercase ) as in_file:
snake_case_ :Any = in_file.readlines()
snake_case_ :Any = os.path.join(_lowercase, f"""{label_name}.jpg""" )
snake_case_ :int = []
for obj_list in obj_lists:
snake_case_ :List[Any] = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_lowercase )
labels.append(_lowercase )
return img_paths, labels
def A_ ( _lowercase, _lowercase, _lowercase = 1 ):
'''simple docstring'''
snake_case_ :Union[str, Any] = []
snake_case_ :Optional[int] = []
snake_case_ :Any = []
for idx in range(len(_lowercase ) ):
snake_case_ :Union[str, Any] = []
snake_case_ :List[Any] = img_list[idx]
path_list.append(_lowercase )
snake_case_ :List[str] = anno_list[idx]
snake_case_ :Optional[Any] = cva.imread(_lowercase )
if flip_type == 1:
snake_case_ :Optional[Any] = cva.flip(_lowercase, _lowercase )
for bbox in img_annos:
snake_case_ :str = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
snake_case_ :List[str] = cva.flip(_lowercase, _lowercase )
for bbox in img_annos:
snake_case_ :Optional[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_lowercase )
new_imgs_list.append(_lowercase )
return new_imgs_list, new_annos_lists, path_list
def A_ ( _lowercase = 32 ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
snake_case_ :Any = ascii_lowercase + digits
return "".join(random.choice(_lowercase ) for _ in range(_lowercase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 310 | 0 |
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Tuple = [1]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__: Optional[Any] = 0, 0, 0
UpperCAmelCase__: Tuple = ugly_nums[ia] * 2
UpperCAmelCase__: Tuple = ugly_nums[ia] * 3
UpperCAmelCase__: Dict = ugly_nums[ia] * 5
for _ in range(1 ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: List[str] = min(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE )
ugly_nums.append(SCREAMING_SNAKE_CASE )
if next_num == next_a:
ia += 1
UpperCAmelCase__: List[Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase__: Union[str, Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase__: str = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(2_00) = }") | 113 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
'''simple docstring'''
__magic_name__ = field(
default=1_2_8 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
__magic_name__ = field(
default=_a ,metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__magic_name__ = field(
default=_a ,metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} ,)
__magic_name__ = field(
default=_a ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
__magic_name__ = field(
default=_a ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
__magic_name__ = field(
default=_a ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} ,)
@dataclass
class __UpperCamelCase :
'''simple docstring'''
__magic_name__ = field(
default=_a ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__magic_name__ = field(
default=_a ,metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
__magic_name__ = field(
default=_a ,metadata={"help": "Train language if it is different from the evaluation language."} )
__magic_name__ = field(
default=_a ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__magic_name__ = field(
default=_a ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__magic_name__ = field(
default=_a ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
__magic_name__ = field(
default=_a ,metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} ,)
__magic_name__ = field(
default=_a ,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} ,)
__magic_name__ = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
__magic_name__ = field(
default=_a ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
__magic_name__ = field(
default=_a ,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} ,)
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase__: Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__: List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" ,SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase__: int = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCAmelCase__: Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase__: Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
UpperCAmelCase__: Optional[int] = load_dataset(
"xnli" ,model_args.language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
UpperCAmelCase__: Optional[Any] = load_dataset(
"xnli" ,model_args.train_language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCAmelCase__: List[Any] = train_dataset.features["label"].names
if training_args.do_eval:
UpperCAmelCase__: List[Any] = load_dataset(
"xnli" ,model_args.language ,split="validation" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCAmelCase__: Tuple = eval_dataset.features["label"].names
if training_args.do_predict:
UpperCAmelCase__: Optional[Any] = load_dataset(
"xnli" ,model_args.language ,split="test" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCAmelCase__: List[Any] = predict_dataset.features["label"].names
# Labels
UpperCAmelCase__: Union[str, Any] = len(SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase__: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=SCREAMING_SNAKE_CASE ,idalabel={str(SCREAMING_SNAKE_CASE ): label for i, label in enumerate(SCREAMING_SNAKE_CASE )} ,labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )} ,finetuning_task="xnli" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCAmelCase__: Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCAmelCase__: Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=SCREAMING_SNAKE_CASE ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
UpperCAmelCase__: Union[str, Any] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCAmelCase__: Union[str, Any] = False
def preprocess_function(SCREAMING_SNAKE_CASE ):
# Tokenize the texts
return tokenizer(
examples["premise"] ,examples["hypothesis"] ,padding=SCREAMING_SNAKE_CASE ,max_length=data_args.max_seq_length ,truncation=SCREAMING_SNAKE_CASE ,)
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCAmelCase__: Optional[Any] = min(len(SCREAMING_SNAKE_CASE ) ,data_args.max_train_samples )
UpperCAmelCase__: Optional[Any] = train_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
UpperCAmelCase__: Dict = train_dataset.map(
SCREAMING_SNAKE_CASE ,batched=SCREAMING_SNAKE_CASE ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on train dataset" ,)
# Log a few random samples from the training set:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCAmelCase__: Optional[int] = min(len(SCREAMING_SNAKE_CASE ) ,data_args.max_eval_samples )
UpperCAmelCase__: List[Any] = eval_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
UpperCAmelCase__: Optional[Any] = eval_dataset.map(
SCREAMING_SNAKE_CASE ,batched=SCREAMING_SNAKE_CASE ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on validation dataset" ,)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
UpperCAmelCase__: Dict = min(len(SCREAMING_SNAKE_CASE ) ,data_args.max_predict_samples )
UpperCAmelCase__: Optional[Any] = predict_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
UpperCAmelCase__: int = predict_dataset.map(
SCREAMING_SNAKE_CASE ,batched=SCREAMING_SNAKE_CASE ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on prediction dataset" ,)
# Get the metric function
UpperCAmelCase__: str = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: str = p.predictions[0] if isinstance(p.predictions ,SCREAMING_SNAKE_CASE ) else p.predictions
UpperCAmelCase__: Union[str, Any] = np.argmax(SCREAMING_SNAKE_CASE ,axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE ,references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCAmelCase__: Any = default_data_collator
elif training_args.fpaa:
UpperCAmelCase__: Any = DataCollatorWithPadding(SCREAMING_SNAKE_CASE ,pad_to_multiple_of=8 )
else:
UpperCAmelCase__: Tuple = None
# Initialize our Trainer
UpperCAmelCase__: Dict = Trainer(
model=SCREAMING_SNAKE_CASE ,args=SCREAMING_SNAKE_CASE ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=SCREAMING_SNAKE_CASE ,tokenizer=SCREAMING_SNAKE_CASE ,data_collator=SCREAMING_SNAKE_CASE ,)
# Training
if training_args.do_train:
UpperCAmelCase__: Any = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase__: str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase__: Any = last_checkpoint
UpperCAmelCase__: Dict = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
UpperCAmelCase__: List[Any] = train_result.metrics
UpperCAmelCase__: Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE )
)
UpperCAmelCase__: Tuple = min(SCREAMING_SNAKE_CASE ,len(SCREAMING_SNAKE_CASE ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" ,SCREAMING_SNAKE_CASE )
trainer.save_metrics("train" ,SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase__: Any = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: int = min(SCREAMING_SNAKE_CASE ,len(SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("eval" ,SCREAMING_SNAKE_CASE )
trainer.save_metrics("eval" ,SCREAMING_SNAKE_CASE )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__: Union[str, Any] = trainer.predict(SCREAMING_SNAKE_CASE ,metric_key_prefix="predict" )
UpperCAmelCase__: Optional[Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(SCREAMING_SNAKE_CASE )
)
UpperCAmelCase__: Union[str, Any] = min(SCREAMING_SNAKE_CASE ,len(SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("predict" ,SCREAMING_SNAKE_CASE )
trainer.save_metrics("predict" ,SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Any = np.argmax(SCREAMING_SNAKE_CASE ,axis=1 )
UpperCAmelCase__: Optional[int] = os.path.join(training_args.output_dir ,"predictions.txt" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE ,"w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: int = label_list[item]
writer.write(f"{index}\t{item}\n" )
if __name__ == "__main__":
main() | 113 | 1 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__ : List[str] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
for attribute in key.split('''.''' ):
__magic_name__ :Any = getattr(snake_case, snake_case )
if weight_type is not None:
__magic_name__ :str = getattr(snake_case, snake_case ).shape
else:
__magic_name__ :Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__magic_name__ :Optional[int] = value
elif weight_type == "weight_g":
__magic_name__ :Optional[Any] = value
elif weight_type == "weight_v":
__magic_name__ :Optional[Any] = value
elif weight_type == "bias":
__magic_name__ :Any = value
else:
__magic_name__ :Dict = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = []
__magic_name__ :Any = fairseq_model.state_dict()
__magic_name__ :Tuple = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__magic_name__ :Any = False
if "conv_layers" in name:
load_conv_layer(
snake_case, snake_case, snake_case, snake_case, hf_model.config.feat_extract_norm == '''group''', )
__magic_name__ :int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__magic_name__ :Optional[int] = True
if "*" in mapped_key:
__magic_name__ :int = name.split(snake_case )[0].split('''.''' )[-2]
__magic_name__ :int = mapped_key.replace('''*''', snake_case )
if "weight_g" in name:
__magic_name__ :List[Any] = '''weight_g'''
elif "weight_v" in name:
__magic_name__ :str = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
__magic_name__ :Union[str, Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__magic_name__ :List[str] = '''weight'''
else:
__magic_name__ :Union[str, Any] = None
set_recursively(snake_case, snake_case, snake_case, snake_case, snake_case )
continue
if not is_used:
unused_weights.append(snake_case )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = full_name.split('''conv_layers.''' )[-1]
__magic_name__ :int = name.split('''.''' )
__magic_name__ :Tuple = int(items[0] )
__magic_name__ :int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__magic_name__ :Dict = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__magic_name__ :int = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__magic_name__ :Optional[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__magic_name__ :Any = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case )
@torch.no_grad()
def __lowercase ( snake_case, snake_case, snake_case=None ):
"""simple docstring"""
__magic_name__ :int = torch.load(snake_case )
__magic_name__ :Any = WavLMConfigOrig(checkpoint['''cfg'''] )
__magic_name__ :str = WavLMOrig(snake_case )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
__magic_name__ :List[Any] = WavLMConfig.from_pretrained(snake_case )
else:
__magic_name__ :int = WavLMConfig()
__magic_name__ :Optional[Any] = WavLMModel(snake_case )
recursively_load_weights(snake_case, snake_case )
hf_wavlm.save_pretrained(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 180 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCamelCase_ :
a__ = 42
# setable values
a__ = 42
a__ = 42
a__ = None
@classmethod
def A ( cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
return cls(common=__lowerCAmelCase , init_noise_sigma=__lowerCAmelCase , timesteps=__lowerCAmelCase )
@dataclass
class lowerCamelCase_ ( lowerCamelCase ):
a__ = 42
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase ):
a__ = [e.name for e in FlaxKarrasDiffusionSchedulers]
a__ = 42
@property
def A ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , __lowerCAmelCase = 1_0_0_0 , __lowerCAmelCase = 0.0001 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = "linear" , __lowerCAmelCase = None , __lowerCAmelCase = "fixed_small" , __lowerCAmelCase = True , __lowerCAmelCase = "epsilon" , __lowerCAmelCase = jnp.floataa , ):
"""simple docstring"""
__magic_name__ :Optional[int] = dtype
def A ( self , __lowerCAmelCase = None ):
"""simple docstring"""
if common is None:
__magic_name__ :Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__magic_name__ :Optional[Any] = jnp.array(1.0 , dtype=self.dtype )
__magic_name__ :str = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__lowerCAmelCase , init_noise_sigma=__lowerCAmelCase , timesteps=__lowerCAmelCase , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
return sample
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = () ):
"""simple docstring"""
__magic_name__ :int = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__magic_name__ :List[Any] = (jnp.arange(0 , __lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__lowerCAmelCase , timesteps=__lowerCAmelCase , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ):
"""simple docstring"""
__magic_name__ :Optional[Any] = state.common.alphas_cumprod[t]
__magic_name__ :Optional[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__magic_name__ :Tuple = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__magic_name__ :Optional[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__magic_name__ :Optional[Any] = jnp.clip(__lowerCAmelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__magic_name__ :Dict = jnp.log(jnp.clip(__lowerCAmelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
__magic_name__ :Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__magic_name__ :Optional[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__magic_name__ :Union[str, Any] = variance
__magic_name__ :List[str] = state.common.betas[t]
__magic_name__ :Any = (predicted_variance + 1) / 2
__magic_name__ :str = frac * max_log + (1 - frac) * min_log
return variance
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True , ):
"""simple docstring"""
__magic_name__ :List[str] = timestep
if key is None:
__magic_name__ :Union[str, Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__magic_name__ , __magic_name__ :Dict = jnp.split(__lowerCAmelCase , sample.shape[1] , axis=1 )
else:
__magic_name__ :Optional[int] = None
# 1. compute alphas, betas
__magic_name__ :Any = state.common.alphas_cumprod[t]
__magic_name__ :int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__magic_name__ :Optional[int] = 1 - alpha_prod_t
__magic_name__ :Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__magic_name__ :List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__magic_name__ :Tuple = model_output
elif self.config.prediction_type == "v_prediction":
__magic_name__ :Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__magic_name__ :Union[str, Any] = jnp.clip(__lowerCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ :Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__magic_name__ :Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ :Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__magic_name__ :Tuple = jax.random.split(__lowerCAmelCase , num=1 )
__magic_name__ :Dict = jax.random.normal(__lowerCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__lowerCAmelCase , __lowerCAmelCase , predicted_variance=__lowerCAmelCase ) ** 0.5) * noise
__magic_name__ :List[str] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__magic_name__ :int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__lowerCAmelCase , state=__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
return add_noise_common(state.common , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
return get_velocity_common(state.common , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 180 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.02 , a__=3 , a__=4 , a__=None , ):
__SCREAMING_SNAKE_CASE : List[Any] = parent
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
__SCREAMING_SNAKE_CASE : int = seq_length
__SCREAMING_SNAKE_CASE : List[str] = is_training
__SCREAMING_SNAKE_CASE : Tuple = use_input_mask
__SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
__SCREAMING_SNAKE_CASE : List[Any] = use_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
__SCREAMING_SNAKE_CASE : str = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE : List[Any] = scope
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Tuple = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = NystromformerModel(config=a__ )
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ , token_type_ids=a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[Any] = NystromformerForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : str = NystromformerForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = self.num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = NystromformerForSequenceClassification(a__ )
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Dict = self.num_labels
__SCREAMING_SNAKE_CASE : Tuple = NystromformerForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : int = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_choices
__SCREAMING_SNAKE_CASE : Optional[int] = NystromformerForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Optional[Any] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : Any = config_and_inputs
__SCREAMING_SNAKE_CASE : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Tuple = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[Any] = False
snake_case__ : Dict = False
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = NystromformerModelTester(self )
__SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE : Tuple = type
self.model_tester.create_and_check_model(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
@slow
def a_ ( self ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = NystromformerModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[Any] = model(a__ )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , a__ )
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1e-4 ) )
@slow
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = "the [MASK] of Belgium is Brussels"
__SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
__SCREAMING_SNAKE_CASE : Dict = tokenizer(a__ , return_tensors="pt" )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(encoding.input_ids ).logits
__SCREAMING_SNAKE_CASE : str = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(a__ ) , "capital" )
| 211 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
lowercase = datasets.logging.get_logger(__name__)
lowercase = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
lowercase = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
lowercase = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def a_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def a_ ( self , a__ ):
if self.config_name == "default":
__SCREAMING_SNAKE_CASE : Union[str, Any] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def a_ ( self , a__ , a__ , a__ , a__=None , a__=False ):
if gpus is None:
__SCREAMING_SNAKE_CASE : List[str] = 1 if torch.cuda.is_available() else 0
__SCREAMING_SNAKE_CASE : Any = {"src": sources, "mt": predictions, "ref": references}
__SCREAMING_SNAKE_CASE : int = [dict(zip(a__ , a__ ) ) for t in zip(*data.values() )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = self.scorer.predict(a__ , gpus=a__ , progress_bar=a__ )
return {"mean_score": mean_score, "scores": scores}
| 211 | 1 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase__ : Tuple = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCamelCase__ ( _A , _A=None ):
'''simple docstring'''
require_version(deps[pkg] , _A )
| 139 |
lowercase__ : Optional[int] = range(2, 20 + 1)
lowercase__ : List[str] = [10**k for k in range(ks[-1] + 1)]
lowercase__ : dict[int, dict[int, list[list[int]]]] = {}
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = sum(a_i[j] for j in range(_A , len(_A ) ) )
snake_case_ = sum(a_i[j] * base[j] for j in range(min(len(_A ) , _A ) ) )
snake_case_ , snake_case_ = 0, 0
snake_case_ = n - i
snake_case_ = memo.get(_A )
if sub_memo is not None:
snake_case_ = sub_memo.get(_A )
if jumps is not None and len(_A ) > 0:
# find and make the largest jump without going over
snake_case_ = -1
for _k in range(len(_A ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
snake_case_ = _k
break
if max_jump >= 0:
snake_case_ , snake_case_ , snake_case_ = jumps[max_jump]
# since the difference between jumps is cached, add c
snake_case_ = diff + c
for j in range(min(_A , len(_A ) ) ):
snake_case_ , snake_case_ = divmod(_A , 10 )
if new_c > 0:
add(_A , _A , _A )
else:
snake_case_ = []
else:
snake_case_ = {c: []}
snake_case_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
snake_case_ , snake_case_ = next_term(_A , k - 1 , i + dn , _A )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
snake_case_ , snake_case_ = compute(_A , _A , i + dn , _A )
diff += _diff
dn += terms_jumped
snake_case_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
snake_case_ = 0
while j < len(_A ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_A , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(_A ):
a_i.extend([0 for _ in range(k - len(_A ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
snake_case_ = i
snake_case_ , snake_case_ , snake_case_ = 0, 0, 0
for j in range(len(_A ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
snake_case_ = ds_c + ds_b
diff += addend
snake_case_ = 0
for j in range(_A ):
snake_case_ = a_i[j] + addend
snake_case_ , snake_case_ = divmod(_A , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_A , _A , _A )
return diff, i - start_i
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
for j in range(_A , len(_A ) ):
snake_case_ = digits[j] + addend
if s >= 10:
snake_case_ , snake_case_ = divmod(_A , 10 )
snake_case_ = addend // 10 + quotient
else:
snake_case_ = s
snake_case_ = addend // 10
if addend == 0:
break
while addend > 0:
snake_case_ , snake_case_ = divmod(_A , 10 )
digits.append(_A )
def lowerCamelCase__ ( _A = 10**15 ):
'''simple docstring'''
snake_case_ = [1]
snake_case_ = 1
snake_case_ = 0
while True:
snake_case_ , snake_case_ = next_term(_A , 20 , i + dn , _A )
dn += terms_jumped
if dn == n - i:
break
snake_case_ = 0
for j in range(len(_A ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 139 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''image_processor''', '''tokenizer''']
__SCREAMING_SNAKE_CASE : Tuple = '''AutoImageProcessor'''
__SCREAMING_SNAKE_CASE : Dict = '''AutoTokenizer'''
def __init__( self , snake_case , snake_case ):
super().__init__(snake_case , snake_case )
snake_case_ = self.image_processor
def __call__( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
snake_case_ = self.tokenizer(snake_case , return_tensors=snake_case , **snake_case )
if images is not None:
snake_case_ = self.image_processor(snake_case , return_tensors=snake_case , **snake_case )
if text is not None and images is not None:
snake_case_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case ) , tensor_type=snake_case )
def a ( self , *snake_case , **snake_case ):
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def a ( self , *snake_case , **snake_case ):
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def a ( self ):
return ["input_ids", "attention_mask", "pixel_values"]
| 362 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class lowercase ( lowercase_ , lowercase_ ):
__SCREAMING_SNAKE_CASE : List[str] = '''resnet'''
__SCREAMING_SNAKE_CASE : Dict = ['''basic''', '''bottleneck''']
def __init__( self , snake_case=3 , snake_case=64 , snake_case=[256, 512, 1024, 2048] , snake_case=[3, 4, 6, 3] , snake_case="bottleneck" , snake_case="relu" , snake_case=False , snake_case=None , snake_case=None , **snake_case , ):
super().__init__(**snake_case )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = downsample_in_first_stage
snake_case_ = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(snake_case ) + 1 )]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : List[Any] = version.parse('''1.11''' )
@property
def a ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def a ( self ):
return 1e-3
| 362 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | def UpperCAmelCase__( __UpperCAmelCase : list ):
__snake_case : List[Any] = len(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__snake_case , __snake_case : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__magic_name__ = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 679 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
__lowerCamelCase : int = PegasusTokenizer
__lowerCamelCase : Union[str, Any] = PegasusTokenizerFast
__lowerCamelCase : Tuple = True
__lowerCamelCase : Dict = True
def _snake_case ( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase = PegasusTokenizer(_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self ) -> Optional[int]:
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def _snake_case ( self , **_lowerCAmelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> str:
return ("This is a test", "This is a test")
def _snake_case ( self ) -> str:
_lowerCAmelCase = "</s>"
_lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(_lowerCAmelCase ) , 1103 )
def _snake_case ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
_lowerCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids[0]
_lowerCAmelCase = py_tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_lowerCAmelCase = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
_lowerCAmelCase = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
_lowerCAmelCase = tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> int:
_lowerCAmelCase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_lowerCAmelCase = "To ensure a smooth flow of bank resolutions."
_lowerCAmelCase = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
_lowerCAmelCase = tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = ["This is going to be way too long." * 150, "short example"]
_lowerCAmelCase = ["not super long but more than 5 tokens", "tiny"]
_lowerCAmelCase = self._large_tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="pt" )
_lowerCAmelCase = self._large_tokenizer(
text_target=_lowerCAmelCase , max_length=5 , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="pt" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowerCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def _snake_case ( self ) -> Dict:
# fmt: off
_lowerCAmelCase = {"input_ids": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Optional[Any] = PegasusTokenizer
__lowerCamelCase : str = PegasusTokenizerFast
__lowerCamelCase : List[Any] = True
__lowerCamelCase : int = True
def _snake_case ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase = PegasusTokenizer(_lowerCAmelCase , offset=0 , mask_token_sent=_lowerCAmelCase , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self ) -> List[Any]:
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def _snake_case ( self , **_lowerCAmelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Dict:
return ("This is a test", "This is a test")
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_lowerCAmelCase = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
_lowerCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids[0]
_lowerCAmelCase = py_tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@require_torch
def _snake_case ( self ) -> str:
_lowerCAmelCase = ["This is going to be way too long." * 1000, "short example"]
_lowerCAmelCase = ["not super long but more than 5 tokens", "tiny"]
_lowerCAmelCase = self._large_tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="pt" )
_lowerCAmelCase = self._large_tokenizer(
text_target=_lowerCAmelCase , max_length=5 , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="pt" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowerCAmelCase ) == 2 # input_ids, attention_mask.
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
_lowerCAmelCase = self._large_tokenizer(_lowerCAmelCase ).input_ids
self.assertListEqual(
_lowerCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 18 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "blenderbot-small"
__lowerCamelCase : Optional[Any] = ["past_key_values"]
__lowerCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=512 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class lowerCAmelCase_ ( __magic_name__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase = {0: "batch"}
_lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
else:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super().outputs
else:
_lowerCAmelCase = super(_lowerCAmelCase , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Generate decoder inputs
_lowerCAmelCase = seq_length if not self.use_past else 1
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase = dict(**_lowerCAmelCase , **_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
_lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1]
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = decoder_seq_length + 3
_lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers
_lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
_lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = common_inputs["attention_mask"].dtype
_lowerCAmelCase = torch.cat(
[common_inputs["attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
elif self.task == "causal-lm":
_lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
else:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCAmelCase = super(_lowerCAmelCase , self )._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
| 18 | 1 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase_ : Any = numpy.array([0, 0])
lowerCAmelCase_ : Optional[int] = numpy.array([0.5, 0.8660254])
lowerCAmelCase_ : List[str] = numpy.array([1, 0])
lowerCAmelCase_ : Union[str, Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __a ( __lowerCamelCase : list[numpy.ndarray] , __lowerCamelCase : int ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase_ = initial_vectors
for _ in range(__lowerCamelCase ):
lowercase_ = iteration_step(__lowerCamelCase )
return vectors
def __a ( __lowerCamelCase : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase_ = []
for i, start_vector in enumerate(vectors[:-1] ):
lowercase_ = vectors[i + 1]
new_vectors.append(__lowerCamelCase )
lowercase_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __a ( __lowerCamelCase : numpy.ndarray , __lowerCamelCase : float ) -> numpy.ndarray:
'''simple docstring'''
lowercase_ = numpy.radians(__lowerCamelCase )
lowercase_ , lowercase_ = numpy.cos(__lowerCamelCase ), numpy.sin(__lowerCamelCase )
lowercase_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__lowerCamelCase , __lowerCamelCase )
def __a ( __lowerCamelCase : list[numpy.ndarray] ) -> None:
'''simple docstring'''
lowercase_ = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowercase_ , lowercase_ = zip(*__lowerCamelCase )
plt.plot(__lowerCamelCase , __lowerCamelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 716 | '''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase ( unittest.TestCase ):
def __UpperCAmelCase ( self : int) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __UpperCAmelCase ( self : Dict) -> Any:
lowercase_ , lowercase_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa)
lowercase_ , lowercase_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=__lowerCAmelCase , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa)
lowercase_ = controlnet_params
lowercase_ = "bird"
lowercase_ = jax.device_count()
lowercase_ = pipe.prepare_text_inputs([prompts] * num_samples)
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
lowercase_ = pipe.prepare_image_inputs([canny_image] * num_samples)
lowercase_ = jax.random.PRNGKey(0)
lowercase_ = jax.random.split(__lowerCAmelCase , jax.device_count())
lowercase_ = replicate(__lowerCAmelCase)
lowercase_ = shard(__lowerCAmelCase)
lowercase_ = shard(__lowerCAmelCase)
lowercase_ = pipe(
prompt_ids=__lowerCAmelCase , image=__lowerCAmelCase , params=__lowerCAmelCase , prng_seed=__lowerCAmelCase , num_inference_steps=50 , jit=__lowerCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowercase_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
lowercase_ = images[0, 253:256, 253:256, -1]
lowercase_ = jnp.asarray(jax.device_get(image_slice.flatten()))
lowercase_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(F'output_slice: {output_slice}')
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def __UpperCAmelCase ( self : Tuple) -> List[str]:
lowercase_ , lowercase_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa)
lowercase_ , lowercase_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=__lowerCAmelCase , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa)
lowercase_ = controlnet_params
lowercase_ = "Chef in the kitchen"
lowercase_ = jax.device_count()
lowercase_ = pipe.prepare_text_inputs([prompts] * num_samples)
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
lowercase_ = pipe.prepare_image_inputs([pose_image] * num_samples)
lowercase_ = jax.random.PRNGKey(0)
lowercase_ = jax.random.split(__lowerCAmelCase , jax.device_count())
lowercase_ = replicate(__lowerCAmelCase)
lowercase_ = shard(__lowerCAmelCase)
lowercase_ = shard(__lowerCAmelCase)
lowercase_ = pipe(
prompt_ids=__lowerCAmelCase , image=__lowerCAmelCase , params=__lowerCAmelCase , prng_seed=__lowerCAmelCase , num_inference_steps=50 , jit=__lowerCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowercase_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
lowercase_ = images[0, 253:256, 253:256, -1]
lowercase_ = jnp.asarray(jax.device_get(image_slice.flatten()))
lowercase_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(F'output_slice: {output_slice}')
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 461 | 0 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'EncodecFeatureExtractor'
SCREAMING_SNAKE_CASE = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: List[Any]) -> Any:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = self.feature_extractor
__lowerCAmelCase : Dict = False
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: Dict=True) -> Dict:
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=_SCREAMING_SNAKE_CASE , language=_SCREAMING_SNAKE_CASE , no_timestamps=_SCREAMING_SNAKE_CASE)
def __call__( self: Optional[Any] , *_SCREAMING_SNAKE_CASE: Tuple , **_SCREAMING_SNAKE_CASE: int) -> Dict:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = kwargs.pop("audio" , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = kwargs.pop("sampling_rate" , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = kwargs.pop("text" , _SCREAMING_SNAKE_CASE)
if len(_SCREAMING_SNAKE_CASE) > 0:
__lowerCAmelCase : str = args[0]
__lowerCAmelCase : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if text is not None:
__lowerCAmelCase : Optional[int] = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
if audio is not None:
__lowerCAmelCase : Tuple = self.feature_extractor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__lowerCAmelCase : List[str] = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
__lowerCAmelCase : Dict = audio_inputs["padding_mask"]
return inputs
def _SCREAMING_SNAKE_CASE ( self: str , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: str) -> str:
"""simple docstring"""
__lowerCAmelCase : int = kwargs.pop("audio" , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = kwargs.pop("padding_mask" , _SCREAMING_SNAKE_CASE)
if len(_SCREAMING_SNAKE_CASE) > 0:
__lowerCAmelCase : Union[str, Any] = args[0]
__lowerCAmelCase : Any = args[1:]
if audio_values is not None:
return self._decode_audio(_SCREAMING_SNAKE_CASE , padding_mask=_SCREAMING_SNAKE_CASE)
else:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple , *_SCREAMING_SNAKE_CASE: Dict , **_SCREAMING_SNAKE_CASE: Any) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional = None) -> List[np.ndarray]:
"""simple docstring"""
__lowerCAmelCase : Tuple = to_numpy(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = audio_values.shape
if padding_mask is None:
return list(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = to_numpy(_SCREAMING_SNAKE_CASE)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__lowerCAmelCase : Union[str, Any] = seq_len - padding_mask.shape[-1]
__lowerCAmelCase : str = 1 - self.feature_extractor.padding_value
__lowerCAmelCase : Optional[Any] = np.pad(_SCREAMING_SNAKE_CASE , ((0, 0), (0, difference)) , "constant" , constant_values=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = audio_values.tolist()
for i in range(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Optional[Any] = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__lowerCAmelCase : int = sliced_audio.reshape(_SCREAMING_SNAKE_CASE , -1)
return audio_values | 293 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'transformers', 'onnx']
def __init__( self: Union[str, Any] , *_SCREAMING_SNAKE_CASE: Any , **_SCREAMING_SNAKE_CASE: int) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Union[str, Any] , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: int , *_SCREAMING_SNAKE_CASE: Any , **_SCREAMING_SNAKE_CASE: str) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'transformers', 'onnx']
def __init__( self: Tuple , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Dict , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: int) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[Any] , *_SCREAMING_SNAKE_CASE: Dict , **_SCREAMING_SNAKE_CASE: Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'transformers', 'onnx']
def __init__( self: int , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: int) -> List[str]:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Dict , *_SCREAMING_SNAKE_CASE: Any , **_SCREAMING_SNAKE_CASE: Any) -> str:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: List[Any] , *_SCREAMING_SNAKE_CASE: List[str] , **_SCREAMING_SNAKE_CASE: int) -> int:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'transformers', 'onnx']
def __init__( self: Tuple , *_SCREAMING_SNAKE_CASE: List[str] , **_SCREAMING_SNAKE_CASE: List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: List[str] , *_SCREAMING_SNAKE_CASE: Tuple , **_SCREAMING_SNAKE_CASE: int) -> int:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[int] , *_SCREAMING_SNAKE_CASE: str , **_SCREAMING_SNAKE_CASE: Any) -> int:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'transformers', 'onnx']
def __init__( self: Optional[Any] , *_SCREAMING_SNAKE_CASE: Union[str, Any] , **_SCREAMING_SNAKE_CASE: Optional[int]) -> str:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[int] , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: Tuple) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: int , *_SCREAMING_SNAKE_CASE: str , **_SCREAMING_SNAKE_CASE: Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'transformers', 'onnx']
def __init__( self: List[Any] , *_SCREAMING_SNAKE_CASE: List[str] , **_SCREAMING_SNAKE_CASE: List[str]) -> int:
"""simple docstring"""
requires_backends(self , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Union[str, Any] , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Optional[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: str , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "transformers", "onnx"]) | 293 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = AudioLDMPipeline
UpperCAmelCase__ = TEXT_TO_AUDIO_PARAMS
UpperCAmelCase__ = TEXT_TO_AUDIO_BATCH_PARAMS
UpperCAmelCase__ = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def lowerCamelCase__ ( self : Any ) -> str:
torch.manual_seed(0 )
__magic_name__: Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(3_2, 6_4) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=__snake_case , )
__magic_name__: Dict = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
__magic_name__: int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__magic_name__: Dict = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
__magic_name__: List[Any] = ClapTextModelWithProjection(__snake_case )
__magic_name__: Optional[int] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=7_7 )
__magic_name__: Tuple = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__snake_case , )
__magic_name__: str = SpeechTaHifiGan(__snake_case )
__magic_name__: List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def lowerCamelCase__ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str]=0 ) -> Tuple:
if str(__snake_case ).startswith("""mps""" ):
__magic_name__: int = torch.manual_seed(__snake_case )
else:
__magic_name__: int = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__: Tuple = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
__magic_name__: str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__: Dict = self.get_dummy_components()
__magic_name__: str = AudioLDMPipeline(**__snake_case )
__magic_name__: int = audioldm_pipe.to(__snake_case )
audioldm_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: Optional[Any] = self.get_dummy_inputs(__snake_case )
__magic_name__: List[str] = audioldm_pipe(**__snake_case )
__magic_name__: Any = output.audios[0]
assert audio.ndim == 1
assert len(__snake_case ) == 2_5_6
__magic_name__: int = audio[:1_0]
__magic_name__: Union[str, Any] = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
__magic_name__: str = self.get_dummy_components()
__magic_name__: Union[str, Any] = AudioLDMPipeline(**__snake_case )
__magic_name__: List[Any] = audioldm_pipe.to(__snake_case )
__magic_name__: Optional[int] = audioldm_pipe.to(__snake_case )
audioldm_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: int = self.get_dummy_inputs(__snake_case )
__magic_name__: str = 3 * [inputs["""prompt"""]]
# forward
__magic_name__: int = audioldm_pipe(**__snake_case )
__magic_name__: Tuple = output.audios[0]
__magic_name__: Tuple = self.get_dummy_inputs(__snake_case )
__magic_name__: Any = 3 * [inputs.pop("""prompt""" )]
__magic_name__: Union[str, Any] = audioldm_pipe.tokenizer(
__snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__snake_case , return_tensors="""pt""" , )
__magic_name__: int = text_inputs["""input_ids"""].to(__snake_case )
__magic_name__: Optional[Any] = audioldm_pipe.text_encoder(
__snake_case , )
__magic_name__: Any = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__magic_name__: Tuple = F.normalize(__snake_case , dim=-1 )
__magic_name__: Union[str, Any] = prompt_embeds
# forward
__magic_name__: Tuple = audioldm_pipe(**__snake_case )
__magic_name__: Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
__magic_name__: List[str] = self.get_dummy_components()
__magic_name__: Tuple = AudioLDMPipeline(**__snake_case )
__magic_name__: Tuple = audioldm_pipe.to(__snake_case )
__magic_name__: int = audioldm_pipe.to(__snake_case )
audioldm_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: str = self.get_dummy_inputs(__snake_case )
__magic_name__: Union[str, Any] = 3 * ["""this is a negative prompt"""]
__magic_name__: List[Any] = negative_prompt
__magic_name__: Union[str, Any] = 3 * [inputs["""prompt"""]]
# forward
__magic_name__: Union[str, Any] = audioldm_pipe(**__snake_case )
__magic_name__: int = output.audios[0]
__magic_name__: Dict = self.get_dummy_inputs(__snake_case )
__magic_name__: List[Any] = 3 * [inputs.pop("""prompt""" )]
__magic_name__: int = []
for p in [prompt, negative_prompt]:
__magic_name__: str = audioldm_pipe.tokenizer(
__snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__snake_case , return_tensors="""pt""" , )
__magic_name__: int = text_inputs["""input_ids"""].to(__snake_case )
__magic_name__: Tuple = audioldm_pipe.text_encoder(
__snake_case , )
__magic_name__: Union[str, Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__magic_name__: Optional[int] = F.normalize(__snake_case , dim=-1 )
embeds.append(__snake_case )
__magic_name__: Tuple = embeds
# forward
__magic_name__: List[Any] = audioldm_pipe(**__snake_case )
__magic_name__: Union[str, Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
__magic_name__: Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__: Tuple = self.get_dummy_components()
__magic_name__: int = PNDMScheduler(skip_prk_steps=__snake_case )
__magic_name__: Optional[Any] = AudioLDMPipeline(**__snake_case )
__magic_name__: Dict = audioldm_pipe.to(__snake_case )
audioldm_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: List[str] = self.get_dummy_inputs(__snake_case )
__magic_name__: Any = """egg cracking"""
__magic_name__: List[str] = audioldm_pipe(**__snake_case , negative_prompt=__snake_case )
__magic_name__: Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(__snake_case ) == 2_5_6
__magic_name__: Optional[Any] = audio[:1_0]
__magic_name__: Tuple = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
__magic_name__: List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__: Dict = self.get_dummy_components()
__magic_name__: Union[str, Any] = PNDMScheduler(skip_prk_steps=__snake_case )
__magic_name__: Optional[Any] = AudioLDMPipeline(**__snake_case )
__magic_name__: List[str] = audioldm_pipe.to(__snake_case )
audioldm_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: List[str] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
__magic_name__: Any = audioldm_pipe(__snake_case , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__magic_name__: List[Any] = 2
__magic_name__: Optional[int] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
__magic_name__: List[str] = 2
__magic_name__: str = audioldm_pipe(__snake_case , num_inference_steps=2 , num_waveforms_per_prompt=__snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
__magic_name__: List[Any] = 2
__magic_name__: Any = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def lowerCamelCase__ ( self : Dict ) -> Tuple:
__magic_name__: List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__: List[str] = self.get_dummy_components()
__magic_name__: Union[str, Any] = AudioLDMPipeline(**__snake_case )
__magic_name__: List[str] = audioldm_pipe.to(__snake_case )
audioldm_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: Any = audioldm_pipe.vocoder.config.sampling_rate
__magic_name__: Union[str, Any] = self.get_dummy_inputs(__snake_case )
__magic_name__: int = audioldm_pipe(audio_length_in_s=0.016 , **__snake_case )
__magic_name__: str = output.audios[0]
assert audio.ndim == 1
assert len(__snake_case ) / vocoder_sampling_rate == 0.016
__magic_name__: Any = audioldm_pipe(audio_length_in_s=0.032 , **__snake_case )
__magic_name__: Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(__snake_case ) / vocoder_sampling_rate == 0.032
def lowerCamelCase__ ( self : Tuple ) -> Optional[Any]:
__magic_name__: Optional[Any] = self.get_dummy_components()
__magic_name__: List[str] = AudioLDMPipeline(**__snake_case )
__magic_name__: Optional[Any] = audioldm_pipe.to(__snake_case )
audioldm_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: Optional[int] = ["""hey"""]
__magic_name__: Dict = audioldm_pipe(__snake_case , num_inference_steps=1 )
__magic_name__: int = output.audios.shape
assert audio_shape == (1, 2_5_6)
__magic_name__: List[str] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__magic_name__: Dict = SpeechTaHifiGan(__snake_case ).to(__snake_case )
__magic_name__: Union[str, Any] = audioldm_pipe(__snake_case , num_inference_steps=1 )
__magic_name__: List[str] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__snake_case )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
self._test_inference_batch_single_identical(test_mean_pixel_difference=__snake_case )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__snake_case )
@slow
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : int , __snake_case : Tuple , __snake_case : int="cpu" , __snake_case : int=torch.floataa , __snake_case : Optional[int]=0 ) -> List[str]:
__magic_name__: Any = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__: List[Any] = np.random.RandomState(__snake_case ).standard_normal((1, 8, 1_2_8, 1_6) )
__magic_name__: Dict = torch.from_numpy(__snake_case ).to(device=__snake_case , dtype=__snake_case )
__magic_name__: List[str] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__: str = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__magic_name__: Tuple = audioldm_pipe.to(__snake_case )
audioldm_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: List[Any] = self.get_inputs(__snake_case )
__magic_name__: Optional[Any] = 2_5
__magic_name__: Any = audioldm_pipe(**__snake_case ).audios[0]
assert audio.ndim == 1
assert len(__snake_case ) == 8_1_9_2_0
__magic_name__: Union[str, Any] = audio[7_7_2_3_0:7_7_2_4_0]
__magic_name__: Optional[Any] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
__magic_name__: str = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
__magic_name__: Union[str, Any] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__magic_name__: str = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
__magic_name__: List[str] = audioldm_pipe.to(__snake_case )
audioldm_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: Optional[int] = self.get_inputs(__snake_case )
__magic_name__: str = audioldm_pipe(**__snake_case ).audios[0]
assert audio.ndim == 1
assert len(__snake_case ) == 8_1_9_2_0
__magic_name__: Optional[Any] = audio[2_7_7_8_0:2_7_7_9_0]
__magic_name__: List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
__magic_name__: int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 709 |
"""simple docstring"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = ["image_processor"]
UpperCAmelCase__ = "SamImageProcessor"
def __init__( self : str , __snake_case : Union[str, Any] ) -> str:
super().__init__(__snake_case )
__magic_name__: List[Any] = self.image_processor
__magic_name__: Optional[int] = -1_0
__magic_name__: Dict = self.image_processor.size["""longest_edge"""]
def __call__( self : List[Any] , __snake_case : Tuple=None , __snake_case : Any=None , __snake_case : Optional[Any]=None , __snake_case : str=None , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Dict , ) -> BatchEncoding:
__magic_name__: Optional[int] = self.image_processor(
__snake_case , return_tensors=__snake_case , **__snake_case , )
# pop arguments that are not used in the foward but used nevertheless
__magic_name__: int = encoding_image_processor["""original_sizes"""]
if hasattr(__snake_case , """numpy""" ): # Checks if Torch or TF tensor
__magic_name__: Optional[Any] = original_sizes.numpy()
__magic_name__, __magic_name__, __magic_name__: Any = self._check_and_preprocess_points(
input_points=__snake_case , input_labels=__snake_case , input_boxes=__snake_case , )
__magic_name__: Optional[int] = self._normalize_and_convert(
__snake_case , __snake_case , input_points=__snake_case , input_labels=__snake_case , input_boxes=__snake_case , return_tensors=__snake_case , )
return encoding_image_processor
def lowerCamelCase__ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Any=None , __snake_case : Dict=None , __snake_case : Union[str, Any]=None , __snake_case : Union[str, Any]="pt" , ) -> Any:
if input_points is not None:
if len(__snake_case ) != len(__snake_case ):
__magic_name__: str = [
self._normalize_coordinates(self.target_size , __snake_case , original_sizes[0] ) for point in input_points
]
else:
__magic_name__: List[str] = [
self._normalize_coordinates(self.target_size , __snake_case , __snake_case )
for point, original_size in zip(__snake_case , __snake_case )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__magic_name__, __magic_name__: Tuple = self._pad_points_and_labels(__snake_case , __snake_case )
__magic_name__: Tuple = np.array(__snake_case )
if input_labels is not None:
__magic_name__: List[Any] = np.array(__snake_case )
if input_boxes is not None:
if len(__snake_case ) != len(__snake_case ):
__magic_name__: List[str] = [
self._normalize_coordinates(self.target_size , __snake_case , original_sizes[0] , is_bounding_box=__snake_case )
for box in input_boxes
]
else:
__magic_name__: List[Any] = [
self._normalize_coordinates(self.target_size , __snake_case , __snake_case , is_bounding_box=__snake_case )
for box, original_size in zip(__snake_case , __snake_case )
]
__magic_name__: int = np.array(__snake_case )
if input_boxes is not None:
if return_tensors == "pt":
__magic_name__: Union[str, Any] = torch.from_numpy(__snake_case )
# boxes batch size of 1 by default
__magic_name__: Union[str, Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__magic_name__: List[Any] = tf.convert_to_tensor(__snake_case )
# boxes batch size of 1 by default
__magic_name__: Optional[Any] = tf.expand_dims(__snake_case , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__magic_name__: Union[str, Any] = torch.from_numpy(__snake_case )
# point batch size of 1 by default
__magic_name__: Optional[int] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__magic_name__: Dict = tf.convert_to_tensor(__snake_case )
# point batch size of 1 by default
__magic_name__: Union[str, Any] = tf.expand_dims(__snake_case , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__magic_name__: Union[str, Any] = torch.from_numpy(__snake_case )
# point batch size of 1 by default
__magic_name__: Optional[Any] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__magic_name__: Union[str, Any] = tf.convert_to_tensor(__snake_case )
# point batch size of 1 by default
__magic_name__: int = tf.expand_dims(__snake_case , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCamelCase__ ( self : List[str] , __snake_case : Tuple , __snake_case : Dict ) -> Optional[int]:
__magic_name__: Union[str, Any] = max([point.shape[0] for point in input_points] )
__magic_name__: Any = []
for i, point in enumerate(__snake_case ):
if point.shape[0] != expected_nb_points:
__magic_name__: Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__magic_name__: str = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(__snake_case )
__magic_name__: str = processed_input_points
return input_points, input_labels
def lowerCamelCase__ ( self : Tuple , __snake_case : int , __snake_case : np.ndarray , __snake_case : Tuple , __snake_case : List[str]=False ) -> np.ndarray:
__magic_name__, __magic_name__: Any = original_size
__magic_name__, __magic_name__: Tuple = self.image_processor._get_preprocess_shape(__snake_case , longest_edge=__snake_case )
__magic_name__: List[str] = deepcopy(__snake_case ).astype(__snake_case )
if is_bounding_box:
__magic_name__: List[str] = coords.reshape(-1 , 2 , 2 )
__magic_name__: str = coords[..., 0] * (new_w / old_w)
__magic_name__: int = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__magic_name__: str = coords.reshape(-1 , 4 )
return coords
def lowerCamelCase__ ( self : int , __snake_case : Optional[Any]=None , __snake_case : Optional[int]=None , __snake_case : int=None , ) -> Dict:
if input_points is not None:
if hasattr(__snake_case , """numpy""" ): # Checks for TF or Torch tensor
__magic_name__: Union[str, Any] = input_points.numpy().tolist()
if not isinstance(__snake_case , __snake_case ) or not isinstance(input_points[0] , __snake_case ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__magic_name__: Dict = [np.array(__snake_case ) for input_point in input_points]
else:
__magic_name__: str = None
if input_labels is not None:
if hasattr(__snake_case , """numpy""" ):
__magic_name__: Optional[int] = input_labels.numpy().tolist()
if not isinstance(__snake_case , __snake_case ) or not isinstance(input_labels[0] , __snake_case ):
raise ValueError("""Input labels must be a list of list integers.""" )
__magic_name__: Tuple = [np.array(__snake_case ) for label in input_labels]
else:
__magic_name__: str = None
if input_boxes is not None:
if hasattr(__snake_case , """numpy""" ):
__magic_name__: Tuple = input_boxes.numpy().tolist()
if (
not isinstance(__snake_case , __snake_case )
or not isinstance(input_boxes[0] , __snake_case )
or not isinstance(input_boxes[0][0] , __snake_case )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__magic_name__: List[Any] = [np.array(__snake_case ).astype(np.floataa ) for box in input_boxes]
else:
__magic_name__: List[str] = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
__magic_name__: int = self.image_processor.model_input_names
return list(dict.fromkeys(__snake_case ) )
def lowerCamelCase__ ( self : Any , *__snake_case : str , **__snake_case : Union[str, Any] ) -> Optional[Any]:
return self.image_processor.post_process_masks(*__snake_case , **__snake_case )
| 213 | 0 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A__ : Union[str, Any] = 16
A__ : int = 32
def UpperCamelCase( __UpperCamelCase : Tuple ):
return int(x / 2**20 )
class __snake_case :
def __enter__( self : str):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCAmelCase_ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self : Any , *A_ : Dict):
gc.collect()
torch.cuda.empty_cache()
lowerCAmelCase_ : str = torch.cuda.memory_allocated()
lowerCAmelCase_ : Optional[int] = torch.cuda.max_memory_allocated()
lowerCAmelCase_ : List[str] = bamb(self.end - self.begin)
lowerCAmelCase_ : Optional[int] = bamb(self.peak - self.begin)
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def UpperCamelCase( __UpperCamelCase : Accelerator ,__UpperCamelCase : int = 16 ,__UpperCamelCase : str = "bert-base-cased" ,__UpperCamelCase : int = 320 ,__UpperCamelCase : int = 160 ,):
lowerCAmelCase_ : Dict = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCAmelCase_ : Any = load_dataset(
'''glue''' ,'''mrpc''' ,split={'''train''': f"""train[:{n_train}]""", '''validation''': f"""validation[:{n_val}]"""} )
def tokenize_function(__UpperCamelCase : Any ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ : Union[str, Any] = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ : List[str] = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(__UpperCamelCase : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase ,padding='''max_length''' ,max_length=128 ,return_tensors='''pt''' )
return tokenizer.pad(__UpperCamelCase ,padding='''longest''' ,return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase_ : Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCAmelCase_ : str = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : Tuple ):
# Initialize accelerator
lowerCAmelCase_ : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ : Any = config['''lr''']
lowerCAmelCase_ : Any = int(config['''num_epochs'''] )
lowerCAmelCase_ : Any = int(config['''seed'''] )
lowerCAmelCase_ : Dict = int(config['''batch_size'''] )
lowerCAmelCase_ : Dict = args.model_name_or_path
set_seed(__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,args.n_train ,args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ : Any = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase )
# Instantiate optimizer
lowerCAmelCase_ : Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ : List[str] = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ : str = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : str = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,)
else:
lowerCAmelCase_ : List[Any] = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ : str = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ : List[Any] = 0
# Now we train the model
lowerCAmelCase_ : Union[str, Any] = {}
for epoch in range(__UpperCamelCase ,__UpperCamelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__UpperCamelCase ):
lowerCAmelCase_ : Union[str, Any] = model(**__UpperCamelCase )
lowerCAmelCase_ : Any = outputs.loss
lowerCAmelCase_ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCAmelCase_ : Tuple = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,'''peak_memory_utilization.json''' ) ,'''w''' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def UpperCamelCase( ):
lowerCAmelCase_ : str = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' ,type=__UpperCamelCase ,default='''bert-base-cased''' ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,required=__UpperCamelCase ,)
parser.add_argument(
'''--output_dir''' ,type=__UpperCamelCase ,default='''.''' ,help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' ,)
parser.add_argument(
'''--peak_memory_upper_bound''' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' ,)
parser.add_argument(
'''--n_train''' ,type=__UpperCamelCase ,default=320 ,help='''Number of training examples to use.''' ,)
parser.add_argument(
'''--n_val''' ,type=__UpperCamelCase ,default=160 ,help='''Number of validation examples to use.''' ,)
parser.add_argument(
'''--num_epochs''' ,type=__UpperCamelCase ,default=1 ,help='''Number of train epochs.''' ,)
lowerCAmelCase_ : Dict = parser.parse_args()
lowerCAmelCase_ : int = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 171 |
from __future__ import annotations
def UpperCamelCase( __UpperCamelCase : int ):
lowerCAmelCase_ : str = 2
lowerCAmelCase_ : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__UpperCamelCase )
if n > 1:
factors.append(__UpperCamelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171 | 1 |
import numpy as np
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any ) -> np.array:
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
import random
from typing import Any
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list ) -> list[Any]:
"""simple docstring"""
for _ in range(len(__magic_name__ ) ):
UpperCamelCase :Dict = random.randint(0 , len(__magic_name__ ) - 1 )
UpperCamelCase :List[str] = random.randint(0 , len(__magic_name__ ) - 1 )
UpperCamelCase , UpperCamelCase :List[Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : str = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 590 | 0 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def lowerCAmelCase_ ( __A : Tuple , __A : str , __A : int ):
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , __A )
snake_case: str = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
snake_case: Union[str, Any] = dataset_size < in_memory_max_size
else:
snake_case: int = False
snake_case: List[str] = is_small_dataset(__A )
assert result == expected | 329 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=[8, 16, 32, 64] , SCREAMING_SNAKE_CASE__=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE__=[2, 3, 4] , SCREAMING_SNAKE_CASE__=1 , ):
'''simple docstring'''
snake_case: Dict = parent
snake_case: List[Any] = batch_size
snake_case: Any = image_size
snake_case: Optional[Any] = num_channels
snake_case: List[Any] = embeddings_size
snake_case: Tuple = hidden_sizes
snake_case: str = depths
snake_case: str = is_training
snake_case: List[str] = use_labels
snake_case: Tuple = hidden_act
snake_case: List[str] = num_labels
snake_case: Optional[int] = scope
snake_case: Any = len(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = out_features
snake_case: Optional[Any] = out_indices
snake_case: str = num_groups
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case: str = None
if self.use_labels:
snake_case: Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
snake_case: Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: str = BitModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: str = self.num_labels
snake_case: int = BitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: List[Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = BitBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: int = model(SCREAMING_SNAKE_CASE__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case: str = None
snake_case: int = BitBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case: str = model(SCREAMING_SNAKE_CASE__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case: str = config_and_inputs
snake_case: int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__UpperCamelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = BitModelTester(self )
snake_case: str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='Bit does not output attentions' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case , snake_case: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case: List[str] = [*signature.parameters.keys()]
snake_case: Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case , snake_case: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case: Optional[int] = model_class(config=SCREAMING_SNAKE_CASE__ )
for name, module in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def _UpperCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case: Tuple = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
snake_case: Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
snake_case: Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case: Optional[int] = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case , snake_case: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case: Optional[int] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case: Tuple = layer_type
snake_case: str = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case: Tuple = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case: Union[str, Any] = BitModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCamelCase ( self ):
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE__ )
snake_case: str = self.default_image_processor
snake_case: List[Any] = prepare_img()
snake_case: List[str] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
snake_case: Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
snake_case: List[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (BitBackbone,) if is_torch_available() else ()
__UpperCamelCase = BitConfig
__UpperCamelCase = False
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = BitModelTester(self ) | 329 | 1 |
import torch
from diffusers import StableDiffusionPipeline
A__ = """path-to-your-trained-model"""
A__ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
A__ = """A photo of sks dog in a bucket"""
A__ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 49 | import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49 | 1 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def A_ ( lowercase ) -> str:
"""simple docstring"""
return 1.0 / (1.0 + np.exp(-_outputs ))
def A_ ( lowercase ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Dict = np.max(_outputs , axis=-1 , keepdims=lowercase )
UpperCAmelCase_ : Any = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase )
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ : Tuple = """sigmoid"""
UpperCamelCase_ : Dict = """softmax"""
UpperCamelCase_ : Optional[int] = """none"""
@add_end_docstrings(
lowerCamelCase_ , r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ : Dict = False
UpperCamelCase_ : Union[str, Any] = ClassificationFunction.NONE
def __init__( self : Any , **a_ : Union[str, Any] )-> Dict:
"""simple docstring"""
super().__init__(**a_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def a ( self : int , a_ : Dict=None , a_ : List[str]=None , a_ : Optional[Any]="" , **a_ : Optional[int] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = tokenizer_kwargs
UpperCAmelCase_ : Optional[Any] = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
UpperCAmelCase_ : Dict = self.model.config.return_all_scores
if isinstance(a_ , a_ ) or top_k is None:
UpperCAmelCase_ : Any = top_k
UpperCAmelCase_ : List[Any] = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , a_ , )
if return_all_scores:
UpperCAmelCase_ : List[str] = None
else:
UpperCAmelCase_ : List[Any] = 1
if isinstance(a_ , a_ ):
UpperCAmelCase_ : str = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
UpperCAmelCase_ : List[str] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Any , *a_ : str , **a_ : Optional[int] )-> Any:
"""simple docstring"""
UpperCAmelCase_ : List[str] = super().__call__(*a_ , **a_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
UpperCAmelCase_ : Optional[Any] = """top_k""" not in kwargs
if isinstance(args[0] , a_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def a ( self : Dict , a_ : List[str] , **a_ : Dict )-> Dict[str, GenericTensor]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.framework
if isinstance(a_ , a_ ):
return self.tokenizer(**a_ , return_tensors=a_ , **a_ )
elif isinstance(a_ , a_ ) and len(a_ ) == 1 and isinstance(inputs[0] , a_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a_ , **a_ )
elif isinstance(a_ , a_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(a_ , return_tensors=a_ , **a_ )
def a ( self : Any , a_ : Optional[Any] )-> int:
"""simple docstring"""
return self.model(**a_ )
def a ( self : Tuple , a_ : List[Any] , a_ : Any=None , a_ : Any=1 , a_ : List[str]=True )-> str:
"""simple docstring"""
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
UpperCAmelCase_ : str = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
UpperCAmelCase_ : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
UpperCAmelCase_ : List[str] = self.model.config.function_to_apply
else:
UpperCAmelCase_ : Any = ClassificationFunction.NONE
UpperCAmelCase_ : Optional[Any] = model_outputs["""logits"""][0]
UpperCAmelCase_ : Optional[Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
UpperCAmelCase_ : List[str] = sigmoid(a_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
UpperCAmelCase_ : Dict = softmax(a_ )
elif function_to_apply == ClassificationFunction.NONE:
UpperCAmelCase_ : str = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
UpperCAmelCase_ : List[str] = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(a_ )
]
if not _legacy:
dict_scores.sort(key=lambda a_ : x["score"] , reverse=a_ )
if top_k is not None:
UpperCAmelCase_ : Optional[Any] = dict_scores[:top_k]
return dict_scores
| 470 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ : str = ["""image_processor""", """tokenizer"""]
UpperCamelCase_ : List[str] = """OwlViTImageProcessor"""
UpperCamelCase_ : str = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Any , a_ : Any=None , a_ : str=None , **a_ : List[str] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , a_ , )
UpperCAmelCase_ : List[Any] = kwargs.pop("""feature_extractor""" )
UpperCAmelCase_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_ , a_ )
def __call__( self : Any , a_ : Optional[int]=None , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="max_length" , a_ : List[Any]="np" , **a_ : int )-> Tuple:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(a_ , a_ ) or (isinstance(a_ , a_ ) and not isinstance(text[0] , a_ )):
UpperCAmelCase_ : List[str] = [self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )]
elif isinstance(a_ , a_ ) and isinstance(text[0] , a_ ):
UpperCAmelCase_ : Optional[int] = []
# Maximum number of queries across batch
UpperCAmelCase_ : Union[str, Any] = max([len(a_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(a_ ) != max_num_queries:
UpperCAmelCase_ : str = t + [""" """] * (max_num_queries - len(a_ ))
UpperCAmelCase_ : Optional[int] = self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )
encodings.append(a_ )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
UpperCAmelCase_ : List[Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Tuple = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase_ : Tuple = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Tuple = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase_ : str = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
UpperCAmelCase_ : str = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase_ : Union[str, Any] = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Optional[int] = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
UpperCAmelCase_ : Union[str, Any] = BatchEncoding()
UpperCAmelCase_ : int = input_ids
UpperCAmelCase_ : List[str] = attention_mask
if query_images is not None:
UpperCAmelCase_ : Optional[int] = BatchEncoding()
UpperCAmelCase_ : Any = self.image_processor(
a_ , return_tensors=a_ , **a_ ).pixel_values
UpperCAmelCase_ : Optional[Any] = query_pixel_values
if images is not None:
UpperCAmelCase_ : str = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
UpperCAmelCase_ : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase_ : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def a ( self : Any , *a_ : Optional[Any] , **a_ : List[str] )-> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process(*a_ , **a_ )
def a ( self : Tuple , *a_ : List[str] , **a_ : Dict )-> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*a_ , **a_ )
def a ( self : Optional[int] , *a_ : Tuple , **a_ : Optional[int] )-> Optional[Any]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*a_ , **a_ )
def a ( self : str , *a_ : Optional[int] , **a_ : str )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def a ( self : str , *a_ : List[Any] , **a_ : List[str] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
def a ( self : Tuple )-> int:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , a_ , )
return self.image_processor_class
@property
def a ( self : Optional[Any] )-> int:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , a_ , )
return self.image_processor
| 470 | 1 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _UpperCamelCase ( __A , __A ) -> Optional[int]:
'''simple docstring'''
for e in env_keys:
UpperCamelCase__ = int(os.environ.get(__A , -1 ) )
if val >= 0:
return val
return default
def _UpperCamelCase ( __A , __A=False ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = os.environ.get(__A , str(__A ) )
return strtobool(__A ) == 1 # As its name indicates `strtobool` actually returns an int...
def _UpperCamelCase ( __A , __A="no" ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = os.environ.get(__A , str(__A ) )
return value
| 223 |
'''simple docstring'''
def _UpperCamelCase ( __A , __A ) -> int:
'''simple docstring'''
while b:
UpperCamelCase__ , UpperCamelCase__ = b, a % b
return a
def _UpperCamelCase ( __A , __A ) -> int:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(__A , a % b )
def _UpperCamelCase ( ) -> int:
'''simple docstring'''
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 223 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Dict, SCREAMING_SNAKE_CASE__: Optional[Any] ) -> List[Any]:
"""simple docstring"""
__a = [0 for i in range(r + 1 )]
# nc0 = 1
__a = 1
for i in range(1, n + 1 ):
# to compute current row from previous row.
__a = min(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 448 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class UpperCAmelCase_ ( lowercase, lowercase ):
"""simple docstring"""
UpperCamelCase_ : int ='focalnet'
def __init__( self , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=96 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=[192, 384, 768, 768] , SCREAMING_SNAKE_CASE_=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[3, 3, 3, 3] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=4.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1e-4 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = image_size
UpperCamelCase :Dict = patch_size
UpperCamelCase :Tuple = num_channels
UpperCamelCase :int = embed_dim
UpperCamelCase :Optional[Any] = use_conv_embed
UpperCamelCase :str = hidden_sizes
UpperCamelCase :str = depths
UpperCamelCase :Optional[int] = focal_levels
UpperCamelCase :Tuple = focal_windows
UpperCamelCase :Optional[int] = hidden_act
UpperCamelCase :Optional[int] = mlp_ratio
UpperCamelCase :Optional[Any] = hidden_dropout_prob
UpperCamelCase :int = drop_path_rate
UpperCamelCase :Dict = use_layerscale
UpperCamelCase :List[str] = layerscale_value
UpperCamelCase :Tuple = use_post_layernorm
UpperCamelCase :int = use_post_layernorm_in_modulation
UpperCamelCase :str = normalize_modulator
UpperCamelCase :Any = initializer_range
UpperCamelCase :Optional[Any] = layer_norm_eps
UpperCamelCase :Dict = encoder_stride
UpperCamelCase :int = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase :int = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 658 | 0 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
a_ = namedtuple("""covid_data""", """cases deaths recovered""")
def a__ ( _UpperCamelCase : str = "https://www.worldometers.info/coronavirus/" ):
__lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(_UpperCamelCase ).content ).xpath(_UpperCamelCase ) )
a_ = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 706 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a_ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
a_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a_ = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
a_ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def a__ ( _UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = None
# source code of `config_class`
__lowerCamelCase = inspect.getsource(_UpperCamelCase )
__lowerCamelCase = _re_checkpoint.findall(_UpperCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
__lowerCamelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__lowerCamelCase = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
__lowerCamelCase = ckpt_name
break
return checkpoint
def a__ ( ):
__lowerCamelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__lowerCamelCase = get_checkpoint_from_config_class(_UpperCamelCase )
__lowerCamelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
__lowerCamelCase = '''\n'''.join(sorted(_UpperCamelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 622 | 0 |
'''simple docstring'''
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_SCREAMING_SNAKE_CASE = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
A_ = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ ,UpperCamelCase_ )
_SCREAMING_SNAKE_CASE = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
A_ = list(s_dict.keys() )
for key in keys:
A_ = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A_ = new_key.replace(UpperCamelCase_ ,UpperCamelCase_ )
print(F'{key} -> {new_key}' )
A_ = s_dict.pop(UpperCamelCase_ )
return s_dict
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ) -> Tuple:
A_ , A_ = emb.weight.shape
A_ = nn.Linear(UpperCamelCase_ ,UpperCamelCase_ ,bias=UpperCamelCase_ )
A_ = emb.weight.data
return lin_layer
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> bytes:
os.makedirs(UpperCamelCase_ ,exist_ok=UpperCamelCase_ )
A_ = os.path.basename(UpperCamelCase_ )
A_ = url.split('/' )[-2]
A_ = os.path.join(UpperCamelCase_ ,UpperCamelCase_ )
if os.path.exists(UpperCamelCase_ ) and not os.path.isfile(UpperCamelCase_ ):
raise RuntimeError(F'{download_target} exists and is not a regular file' )
if os.path.isfile(UpperCamelCase_ ):
A_ = open(UpperCamelCase_ ,'rb' ).read()
if hashlib.shaaaa(UpperCamelCase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(UpperCamelCase_ ) as source, open(UpperCamelCase_ ,'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) ,ncols=80 ,unit='iB' ,unit_scale=UpperCamelCase_ ,unit_divisor=1024 ) as loop:
while True:
A_ = source.read(8192 )
if not buffer:
break
output.write(UpperCamelCase_ )
loop.update(len(UpperCamelCase_ ) )
A_ = open(UpperCamelCase_ ,'rb' ).read()
if hashlib.shaaaa(UpperCamelCase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Any:
if ".pt" not in checkpoint_path:
A_ = _download(_MODELS[checkpoint_path] )
else:
A_ = torch.load(UpperCamelCase_ ,map_location='cpu' )
A_ = original_checkpoint['dims']
A_ = original_checkpoint['model_state_dict']
A_ = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(UpperCamelCase_ )
rename_keys(UpperCamelCase_ )
A_ = True
A_ = state_dict['decoder.layers.0.fc1.weight'].shape[0]
A_ = WhisperConfig(
vocab_size=dimensions['n_vocab'] ,encoder_ffn_dim=UpperCamelCase_ ,decoder_ffn_dim=UpperCamelCase_ ,num_mel_bins=dimensions['n_mels'] ,d_model=dimensions['n_audio_state'] ,max_target_positions=dimensions['n_text_ctx'] ,encoder_layers=dimensions['n_audio_layer'] ,encoder_attention_heads=dimensions['n_audio_head'] ,decoder_layers=dimensions['n_text_layer'] ,decoder_attention_heads=dimensions['n_text_state'] ,max_source_positions=dimensions['n_audio_ctx'] ,)
A_ = WhisperForConditionalGeneration(UpperCamelCase_ )
A_ , A_ = model.model.load_state_dict(UpperCamelCase_ ,strict=UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0 and not set(UpperCamelCase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F' but all the following weights are missing {missing}' )
if tie_embeds:
A_ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A_ = proj_out_weights
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 366 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_lowerCAmelCase : Dict = float('nan')
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , __snake_case : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = sys.stdout
lowerCamelCase = open(__snake_case , 'a' )
def __getattr__( self : int , __snake_case : str ) -> Tuple:
'''simple docstring'''
return getattr(self.stdout , __snake_case )
def lowerCamelCase__ ( self : Dict , __snake_case : List[Any] ) -> int:
'''simple docstring'''
self.stdout.write(__snake_case )
# strip tqdm codes
self.file.write(re.sub(R'^.*\r' , '' , __snake_case , 0 , re.M ) )
def a_ ( UpperCamelCase_ : List[str]=8_0 , UpperCamelCase_ : Optional[Any]=False ) -> Any:
"""simple docstring"""
lowerCamelCase = []
# deal with critical env vars
lowerCamelCase = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
lowerCamelCase = os.environ.get(UpperCamelCase_ , UpperCamelCase_ )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
lowerCamelCase = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(UpperCamelCase_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCamelCase = []
lowerCamelCase = ''
while len(UpperCamelCase_ ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(UpperCamelCase_ ) == 0 or len(UpperCamelCase_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase_ )
lowerCamelCase = ''
return "\\\n".join(UpperCamelCase_ )
def a_ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = re.sub(R'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
lowerCamelCase = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
lowerCamelCase = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : str ) -> Union[str, Any]:
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_0_0 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , )
lowerCamelCase = subprocess.run(UpperCamelCase_ , capture_output=UpperCamelCase_ , text=UpperCamelCase_ )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
lowerCamelCase = variation.replace(' ' , '-' )
with open(Path(UpperCamelCase_ ) / f'''log.{prefix}.stdout.txt''' , 'w' ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase_ ) / f'''log.{prefix}.stderr.txt''' , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , 'r' , encoding='utf-8' ) as f:
lowerCamelCase = json.load(UpperCamelCase_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def a_ ( UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = f'''{id}: {variation:<{longest_variation_len}}'''
lowerCamelCase = f'''{preamble}: '''
lowerCamelCase = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase_ ) , desc=UpperCamelCase_ , leave=UpperCamelCase_ ):
lowerCamelCase = process_run_single(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase_ ):
metrics.append(UpperCamelCase_ )
results.append(UpperCamelCase_ )
outcome += "✓"
else:
outcome += "✘"
lowerCamelCase = f'''\33[2K\r{outcome}'''
if len(UpperCamelCase_ ) > 0:
lowerCamelCase = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
lowerCamelCase = round(mean_metrics[target_metric_key] , 2 )
lowerCamelCase = f'''{outcome} {mean_target}'''
if len(UpperCamelCase_ ) > 1:
results_str += f''' {tuple(round(UpperCamelCase_ , 2 ) for x in results )}'''
print(UpperCamelCase_ )
lowerCamelCase = variation
return mean_metrics
else:
print(UpperCamelCase_ )
return {variation_key: variation, target_metric_key: nan}
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase = torch.cuda.get_device_properties(torch.device('cuda' ) )
return f'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**3_0:0.2f}GB
'''
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase = pd.DataFrame(UpperCamelCase_ )
lowerCamelCase = 'variation'
lowerCamelCase = 'diff_%'
lowerCamelCase = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
lowerCamelCase = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase_ ):
# as a fallback, use the minimal value as the sentinel
lowerCamelCase = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase_ ):
lowerCamelCase = df.apply(
lambda UpperCamelCase_ : round(1_0_0 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
lowerCamelCase = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCamelCase = df.reindex(UpperCamelCase_ , axis='columns' ) # reorder cols
# capitalize
lowerCamelCase = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
lowerCamelCase = df.rename(lambda UpperCamelCase_ : c.replace('_' , '<br>' ) , axis='columns' )
lowerCamelCase = df.rename(lambda UpperCamelCase_ : c.replace('_' , '\n' ) , axis='columns' )
lowerCamelCase = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase_ , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase_ , floatfmt='.2f' )]
print('\n\n'.join(UpperCamelCase_ ) )
def a_ ( ) -> Any:
"""simple docstring"""
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help='Base cmd' , )
parser.add_argument(
'--variations' , default=UpperCamelCase_ , type=UpperCamelCase_ , nargs='+' , required=UpperCamelCase_ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=UpperCamelCase_ , type=UpperCamelCase_ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=UpperCamelCase_ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=UpperCamelCase_ , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=UpperCamelCase_ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=UpperCamelCase_ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
lowerCamelCase = parser.parse_args()
lowerCamelCase = args.output_dir
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
lowerCamelCase = get_base_command(UpperCamelCase_ , UpperCamelCase_ )
# split each dimension into its --foo variations
lowerCamelCase = [list(map(str.strip , re.split(R'\|' , UpperCamelCase_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCamelCase = list(map(str.strip , map(' '.join , itertools.product(*UpperCamelCase_ ) ) ) )
lowerCamelCase = max(len(UpperCamelCase_ ) for x in variations )
# split wanted keys
lowerCamelCase = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCamelCase = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
lowerCamelCase = Tee(UpperCamelCase_ )
print(f'''\n*** Running {len(UpperCamelCase_ )} benchmarks:''' )
print(f'''Base command: {" ".join(UpperCamelCase_ )}''' )
lowerCamelCase = 'variation'
lowerCamelCase = []
for id, variation in enumerate(tqdm(UpperCamelCase_ , desc='Total completion: ' , leave=UpperCamelCase_ ) ):
lowerCamelCase = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , args.target_metric_key , UpperCamelCase_ , args.repeat_times , UpperCamelCase_ , args.verbose , ) )
process_results(UpperCamelCase_ , args.target_metric_key , UpperCamelCase_ , args.base_variation , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 246 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase = 25_00_04
__UpperCAmelCase = 25_00_20
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =MBartaaTokenizer
UpperCAmelCase_ =MBartaaTokenizerFast
UpperCAmelCase_ =True
UpperCAmelCase_ =True
def _UpperCamelCase ( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ = MBartaaTokenizer(_A , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = '''<s>'''
SCREAMING_SNAKE_CASE_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_A ) , 1054 )
def _UpperCamelCase ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = MBartaaTokenizer(_A , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def _UpperCamelCase ( self ) -> List[Any]:
# fmt: off
SCREAMING_SNAKE_CASE_ = {'''input_ids''': [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def _UpperCamelCase ( self ) -> str:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE_ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = tokenizer_r.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE_ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_ = tokenizer_r.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = tokenizer_r.save_pretrained(_A , legacy_format=_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_ = tokenizer_r.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = tokenizer_r.save_pretrained(_A , legacy_format=_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.save_pretrained(_A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_ = tokenizer_r.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ ="facebook/mbart-large-50-one-to-many-mmt"
UpperCAmelCase_ =[
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
UpperCAmelCase_ =[
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
UpperCAmelCase_ =[EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def _UpperCamelCase ( cls ) -> Any:
SCREAMING_SNAKE_CASE_ = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
SCREAMING_SNAKE_CASE_ = 1
return cls
def _UpperCamelCase ( self ) -> Dict:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 250038 )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A )
def _UpperCamelCase ( self ) -> Optional[int]:
self.assertIn(_A , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
SCREAMING_SNAKE_CASE_ = self.tokenizer.decode(_A , skip_special_tokens=_A )
SCREAMING_SNAKE_CASE_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , _A )
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , max_length=_A , truncation=_A ).input_ids[0]
self.assertEqual(ids[0] , _A )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(_A ) , _A )
def _UpperCamelCase ( self ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250053, 250001] )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = MBartaaTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _A )
@require_torch
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_A , truncation=_A , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _A )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.src_text , padding=_A , truncation=_A , max_length=3 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text_target=self.tgt_text , padding=_A , truncation=_A , max_length=10 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = targets['''input_ids''']
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(_A ) , {
# en_XX, A, test, EOS
'''input_ids''': [[250004, 62, 3034, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , )
| 597 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__UpperCAmelCase , __UpperCAmelCase ) ) )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
lowerCamelCase_ : int = (
'''Wrong input data\'s dimensions... '''
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__UpperCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCamelCase_ : List[Any] = (
'''Wrong input data\'s shape... '''
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__UpperCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
lowerCamelCase_ : List[Any] = (
'''Input data have different datatype... '''
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = []
for value in value_array:
lowerCamelCase_ : List[Any] = euclidean(__UpperCAmelCase , dataset[0] )
lowerCamelCase_ : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCamelCase_ : int = euclidean(__UpperCAmelCase , __UpperCAmelCase )
if dist > temp_dist:
lowerCamelCase_ : List[str] = temp_dist
lowerCamelCase_ : Optional[int] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
return np.dot(__UpperCAmelCase , __UpperCAmelCase ) / (norm(__UpperCAmelCase ) * norm(__UpperCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 501 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCamelCase : Tuple = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : bool , UpperCamelCase_ : str = None , UpperCamelCase_ : list = None ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : int = None
lowerCamelCase_ : Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCamelCase_ : List[Any] = os.path.abspath('''examples''' )
for item in os.listdir(UpperCamelCase_ ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase_ : List[str] = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if os.path.isfile(UpperCamelCase_ ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCamelCase_ , feature_script=UpperCamelCase_ , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCamelCase_ : Optional[int] = compare_against_test(
os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = '''\n'''.join(UpperCamelCase_ )
if special_strings is not None:
for string in special_strings:
lowerCamelCase_ : List[Any] = diff.replace(UpperCamelCase_ , '''''' )
self.assertEqual(UpperCamelCase_ , '''''' )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
self.one_complete_example('''complete_nlp_example.py''' , UpperCamelCase_ )
self.one_complete_example('''complete_nlp_example.py''' , UpperCamelCase_ )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ : Any = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCamelCase_ : int = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.one_complete_example('''complete_cv_example.py''' , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@mock.patch.dict(os.environ ,{"TESTING_MOCKED_DATALOADERS": "1"} )
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = False
@classmethod
def __UpperCamelCase ( cls : int ) -> Tuple:
"""simple docstring"""
super().setUpClass()
lowerCamelCase_ : Any = tempfile.mkdtemp()
lowerCamelCase_ : int = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase_ : List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __UpperCamelCase ( cls : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Dict = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
lowerCamelCase_ : str = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Dict = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
lowerCamelCase_ : Any = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase_ )
self.assertNotIn('''epoch 0:''' , UpperCamelCase_ )
self.assertIn('''epoch 1:''' , UpperCamelCase_ )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
lowerCamelCase_ : List[str] = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase_ )
if torch.cuda.is_available():
lowerCamelCase_ : str = torch.cuda.device_count()
else:
lowerCamelCase_ : List[Any] = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , UpperCamelCase_ )
self.assertIn('''epoch 1:''' , UpperCamelCase_ )
else:
self.assertIn('''epoch 0:''' , UpperCamelCase_ )
self.assertIn('''epoch 1:''' , UpperCamelCase_ )
@slow
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ : int = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase_ : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase_ )
lowerCamelCase_ : Any = re.findall('''({.+})''' , UpperCamelCase_ )
lowerCamelCase_ : int = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase_ : int = ast.literal_eval(UpperCamelCase_ )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase_ : Union[str, Any] = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''tracking''' ) ) )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ : List[Any] = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 501 | 1 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__a : Union[str, Any] = logging.get_logger(__name__)
__a : Any = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
__a : List[str] = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
__a : Tuple = {
"""jukebox""": 5_1_2,
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_LYRIC_TOKENS_SIZES
lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=["v3", "v2", "v2"] , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE="<|endoftext|>" , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else unk_token
super().__init__(
unk_token=SCREAMING_SNAKE_CASE , n_genres=SCREAMING_SNAKE_CASE , version=SCREAMING_SNAKE_CASE , max_n_lyric_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
UpperCamelCase = version
UpperCamelCase = max_n_lyric_tokens
UpperCamelCase = n_genres
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as vocab_handle:
UpperCamelCase = json.load(SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as vocab_handle:
UpperCamelCase = json.load(SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as vocab_handle:
UpperCamelCase = json.load(SCREAMING_SNAKE_CASE )
UpperCamelCase = R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
UpperCamelCase = oov.replace(R"\-'" , R"\-+'" )
UpperCamelCase = regex.compile(SCREAMING_SNAKE_CASE )
UpperCamelCase = {v: k for k, v in self.artists_encoder.items()}
UpperCamelCase = {v: k for k, v in self.genres_encoder.items()}
UpperCamelCase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = [self.artists_encoder.get(SCREAMING_SNAKE_CASE , 0 ) for artist in list_artists]
for genres in range(len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase = [self.genres_encoder.get(SCREAMING_SNAKE_CASE , 0 ) for genre in list_genres[genres]]
UpperCamelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
UpperCamelCase = [[self.lyrics_encoder.get(SCREAMING_SNAKE_CASE , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return list(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.prepare_for_tokenization(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = self._tokenize(SCREAMING_SNAKE_CASE )
return artist, genre, lyrics
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
UpperCamelCase = artists[idx].lower()
UpperCamelCase = [genres[idx].lower()]
else:
UpperCamelCase = self._normalize(artists[idx] ) + ".v2"
UpperCamelCase = [
self._normalize(SCREAMING_SNAKE_CASE ) + ".v2" for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
UpperCamelCase = regex.compile(R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" )
UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
UpperCamelCase = {vocab[index]: index + 1 for index in range(len(SCREAMING_SNAKE_CASE ) )}
UpperCamelCase = 0
UpperCamelCase = len(SCREAMING_SNAKE_CASE ) + 1
UpperCamelCase = self.vocab
UpperCamelCase = {v: k for k, v in self.vocab.items()}
UpperCamelCase = ""
else:
UpperCamelCase = regex.compile(R"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" )
UpperCamelCase = self._run_strip_accents(SCREAMING_SNAKE_CASE )
UpperCamelCase = lyrics.replace("\\" , "\n" )
UpperCamelCase = self.out_of_vocab.sub("" , SCREAMING_SNAKE_CASE ), [], []
return artists, genres, lyrics
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = unicodedata.normalize("NFD" , SCREAMING_SNAKE_CASE )
UpperCamelCase = []
for char in text:
UpperCamelCase = unicodedata.category(SCREAMING_SNAKE_CASE )
if cat == "Mn":
continue
output.append(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = (
[chr(SCREAMING_SNAKE_CASE ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(SCREAMING_SNAKE_CASE ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(SCREAMING_SNAKE_CASE ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ["."]
)
UpperCamelCase = frozenset(SCREAMING_SNAKE_CASE )
UpperCamelCase = re.compile(R"_+" )
UpperCamelCase = "".join([c if c in accepted else "_" for c in text.lower()] )
UpperCamelCase = pattern.sub("_" , SCREAMING_SNAKE_CASE ).strip("_" )
return text
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return " ".join(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> Any:
"""simple docstring"""
# Convert to TensorType
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase = TensorType(SCREAMING_SNAKE_CASE )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
UpperCamelCase = tf.constant
UpperCamelCase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
UpperCamelCase = torch.tensor
UpperCamelCase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
UpperCamelCase = jnp.array
UpperCamelCase = _is_jax
else:
UpperCamelCase = np.asarray
UpperCamelCase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
UpperCamelCase = [inputs]
if not is_tensor(SCREAMING_SNAKE_CASE ):
UpperCamelCase = as_tensor(SCREAMING_SNAKE_CASE )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length." )
return inputs
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="" , SCREAMING_SNAKE_CASE="pt" ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase = [0, 0, 0]
UpperCamelCase = [artist] * len(self.version )
UpperCamelCase = [genres] * len(self.version )
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.tokenize(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase , UpperCamelCase = self._convert_token_to_id(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = [-INFINITY] * len(full_tokens[-1] )
UpperCamelCase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=SCREAMING_SNAKE_CASE )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=SCREAMING_SNAKE_CASE ) )
UpperCamelCase = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=SCREAMING_SNAKE_CASE ) )
UpperCamelCase = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=SCREAMING_SNAKE_CASE ) )
return (artists_file, genres_file, lyrics_file)
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.artists_decoder.get(SCREAMING_SNAKE_CASE )
UpperCamelCase = [self.genres_decoder.get(SCREAMING_SNAKE_CASE ) for genre in genres_index]
UpperCamelCase = [self.lyrics_decoder.get(SCREAMING_SNAKE_CASE ) for character in lyric_index]
return artist, genres, lyrics
| 414 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __magic_name__ ( lowercase_ = "isbn/0140328726" ) -> dict:
'''simple docstring'''
UpperCamelCase = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
UpperCamelCase = f'''{olid} is not a valid Open Library olid'''
raise ValueError(lowercase_ )
return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json()
def __magic_name__ ( lowercase_ ) -> dict:
'''simple docstring'''
UpperCamelCase = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
UpperCamelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCamelCase = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
UpperCamelCase = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(lowercase_ , lowercase_ ):
UpperCamelCase = ", ".join(lowercase_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__a : Optional[Any] = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(F'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(F'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__a : Any = summarize_book(get_openlibrary_data(F'isbn/{isbn}'))
print("""\n""".join(F'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'Sorry, there are no results for ISBN: {isbn}.')
| 414 | 1 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_lowercase : List[str] = k.replace(A__ , A__ )
if k.startswith('encoder' ):
_lowercase : int = k.replace('.attn' , '.self_attn' )
_lowercase : int = k.replace('norm1' , 'self_attn_layer_norm' )
_lowercase : int = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_lowercase : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
_lowercase : str = k.replace('norm2' , 'encoder_attn_layer_norm' )
_lowercase : str = k.replace('norm3' , 'final_layer_norm' )
return k
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Optional[Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
_lowercase : Tuple = sd.pop(A__ )
_lowercase : str = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_lowercase : Dict = v
SCREAMING_SNAKE_CASE : Tuple = ['START']
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase : Optional[int] = torch.load(A__ , map_location='cpu' )
_lowercase : int = model['model']
_lowercase : List[str] = BlenderbotConfig.from_json_file(A__ )
_lowercase : Optional[Any] = BlenderbotForConditionalGeneration(A__ )
_lowercase : str = m.model.state_dict().keys()
_lowercase : int = []
_lowercase : List[str] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_lowercase : str = rename_state_dict_key(A__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_lowercase : Union[str, Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(A__ )
m.model.load_state_dict(A__ , strict=A__ )
m.half()
m.save_pretrained(A__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 89 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Dict = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 16 | 0 |
"""simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def UpperCAmelCase ( UpperCamelCase__ = 100 ):
"""simple docstring"""
A__ = 1
A__ = 2
for i in range(2 , max_n + 1 ):
A__ = pre_numerator
A__ = 2 * i // 3 if i % 3 == 0 else 1
A__ = cur_numerator
A__ = e_cont * pre_numerator + temp
return sum_digits(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 713 | """simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowerCamelCase = "."
if __name__ == "__main__":
__lowerCamelCase = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__lowerCamelCase = []
__lowerCamelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowerCamelCase = line.strip()
__lowerCamelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowerCamelCase = "\n".join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 536 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class A ( __UpperCAmelCase ):
lowerCamelCase : Optional[Any] = """deformable_detr"""
lowerCamelCase : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=3 , lowerCamelCase__=300 , lowerCamelCase__=1_024 , lowerCamelCase__=6 , lowerCamelCase__=1_024 , lowerCamelCase__=8 , lowerCamelCase__=6 , lowerCamelCase__=1_024 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=True , lowerCamelCase__="relu" , lowerCamelCase__=256 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1.0 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__="sine" , lowerCamelCase__="resnet50" , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=False , lowerCamelCase__=300 , lowerCamelCase__=False , lowerCamelCase__=1 , lowerCamelCase__=5 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=1 , lowerCamelCase__=5 , lowerCamelCase__=2 , lowerCamelCase__=0.1 , lowerCamelCase__=0.25 , lowerCamelCase__=False , **lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowercase__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ = backbone_config.get("""model_type""" )
lowercase__ = CONFIG_MAPPING[backbone_model_type]
lowercase__ = config_class.from_dict(lowerCamelCase__ )
lowercase__ = use_timm_backbone
lowercase__ = backbone_config
lowercase__ = num_channels
lowercase__ = num_queries
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = init_xavier_std
lowercase__ = encoder_layerdrop
lowercase__ = auxiliary_loss
lowercase__ = position_embedding_type
lowercase__ = backbone
lowercase__ = use_pretrained_backbone
lowercase__ = dilation
# deformable attributes
lowercase__ = num_feature_levels
lowercase__ = encoder_n_points
lowercase__ = decoder_n_points
lowercase__ = two_stage
lowercase__ = two_stage_num_proposals
lowercase__ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
lowercase__ = class_cost
lowercase__ = bbox_cost
lowercase__ = giou_cost
# Loss coefficients
lowercase__ = mask_loss_coefficient
lowercase__ = dice_loss_coefficient
lowercase__ = bbox_loss_coefficient
lowercase__ = giou_loss_coefficient
lowercase__ = eos_coefficient
lowercase__ = focal_alpha
lowercase__ = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.d_model
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase__ = self.backbone_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 325 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
def __init__( self , lowerCamelCase__ , lowerCamelCase__=3 , lowerCamelCase__=32 , lowerCamelCase__=3 , lowerCamelCase__=10 , lowerCamelCase__=[10, 20, 30, 40] , lowerCamelCase__=[1, 1, 2, 1] , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="relu" , lowerCamelCase__=3 , lowerCamelCase__=None , ) -> List[str]:
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(lowerCamelCase__ )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Any:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = RegNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = RegNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Optional[Any] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
lowerCamelCase : Any = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : str = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : str = False
lowerCamelCase : Union[str, Any] = False
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = RegNetModelTester(self )
lowercase__ = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Any:
'''simple docstring'''
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def A__ ( self ) -> str:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase__ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(config=lowerCamelCase__ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ = layer_type
lowercase__ = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = RegNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _A ( ):
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> Dict:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase__ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase__ )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
lowercase__ = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 325 | 1 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __snake_case ( _UpperCAmelCase ):
__a = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __snake_case ( _UpperCAmelCase ):
__a = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') )
return token
def __snake_case ( ):
__a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = __a = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__a = [2, 2, 20]
__a = [3, 12, 16]
__a = [192, 768, 1024]
__a = CvtForImageClassification(_UpperCAmelCase )
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__a = image_size
__a = torch.load(_UpperCAmelCase , map_location=torch.device('''cpu''' ) )
__a = OrderedDict()
__a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__a = list_of_state_dict + cls_token(_UpperCAmelCase )
__a = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
__a = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
__a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
__a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__snake_case :str = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__snake_case :Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 60 |
from collections.abc import Generator
from math import sin
def __snake_case ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
__a = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __snake_case ( _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
__a = format(_UpperCAmelCase , '''08x''' )[-8:]
__a = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def __snake_case ( _UpperCAmelCase ):
__a = b''''''
for char in message:
bit_string += format(_UpperCAmelCase , '''08b''' ).encode('''utf-8''' )
__a = format(len(_UpperCAmelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __snake_case ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(_UpperCAmelCase ) , 512 ):
__a = bit_string[pos : pos + 512]
__a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __snake_case ( _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
__a = format(_UpperCAmelCase , '''032b''' )
__a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCAmelCase , 2 )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return (a + b) % 2**32
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __snake_case ( _UpperCAmelCase ):
__a = preprocess(_UpperCAmelCase )
__a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__a = 0X67_452_301
__a = 0Xef_cda_b89
__a = 0X98_bad_cfe
__a = 0X10_325_476
__a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCAmelCase ):
__a = aa
__a = ba
__a = ca
__a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__a = d ^ (b & (c ^ d))
__a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__a = c ^ (d & (b ^ c))
__a = (5 * i + 1) % 16
elif i <= 47:
__a = b ^ c ^ d
__a = (3 * i + 5) % 16
else:
__a = c ^ (b | not_aa(_UpperCAmelCase ))
__a = (7 * i) % 16
__a = (f + a + added_consts[i] + block_words[g]) % 2**32
__a = d
__a = c
__a = b
__a = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 1 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase( a__ ,a__ = "cpu" ,a__ = None):
_SCREAMING_SNAKE_CASE =torch.load(snake_case__ ,map_location=snake_case__)
for k, v in tqdm(state_dict.items()):
if not isinstance(snake_case__ ,torch.Tensor):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''')
_SCREAMING_SNAKE_CASE =v.half()
if save_path is None: # overwrite src_path
_SCREAMING_SNAKE_CASE =src_path
torch.save(snake_case__ ,snake_case__)
if __name__ == "__main__":
fire.Fire(convert) | 691 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : Dict ,__A : List[Any]=7 ,__A : Dict=3 ,__A : Tuple=30 ,__A : Dict=400 ,__A : Any=True ,__A : List[Any]=None ,__A : Any=True ,__A : List[str]=[0.5, 0.5, 0.5] ,__A : Union[str, Any]=[0.5, 0.5, 0.5] ,__A : int=True ,__A : List[str]=1 / 255 ,__A : Union[str, Any]=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_pad
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : List[str]=False ) -> Union[str, Any]:
if not batched:
_lowercase = image_inputs[0]
if isinstance(__A ,Image.Image ):
_lowercase , _lowercase = image.size
else:
_lowercase , _lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size['shortest_edge'] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size['shortest_edge'] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase , _lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(__A ,key=lambda __A : item[0] )[0]
_lowercase = max(__A ,key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'image_mean' ) )
self.assertTrue(hasattr(__A ,'image_std' ) )
self.assertTrue(hasattr(__A ,'do_normalize' ) )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'do_rescale' ) )
self.assertTrue(hasattr(__A ,'do_pad' ) )
self.assertTrue(hasattr(__A ,'size' ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# prepare image and target
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowercase = DetaImageProcessor()
_lowercase = image_processing(images=__A ,annotations=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowercase = DetaImageProcessor(format='coco_panoptic' )
_lowercase = image_processing(images=__A ,annotations=__A ,masks_path=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify masks
_lowercase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__A )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) ) | 67 | 0 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"""vocab_file""": """vocab.txt"""}
__magic_name__ = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
__magic_name__ = {
"""facebook/esm2_t6_8M_UR50D""": 10_24,
"""facebook/esm2_t12_35M_UR50D""": 10_24,
}
def _A ( __lowercase ):
"""simple docstring"""
with open(__lowercase , """r""" ) as f:
lowerCamelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict="<unk>" , SCREAMING_SNAKE_CASE_ : List[Any]="<cls>" , SCREAMING_SNAKE_CASE_ : Tuple="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : List[str]="<eos>" , **SCREAMING_SNAKE_CASE_ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = load_vocab_file(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = dict(enumerate(self.all_tokens ) )
lowerCamelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCamelCase__ = unk_token
lowerCamelCase__ = cls_token
lowerCamelCase__ = pad_token
lowerCamelCase__ = mask_token
lowerCamelCase__ = eos_token
lowerCamelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ):
return self._id_to_token.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
return self._token_to_id.get(SCREAMING_SNAKE_CASE_ , self._token_to_id.get(self.unk_token ) )
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
return text.split()
def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ):
return len(self._id_to_token )
def __UpperCAmelCase ( self : Optional[Any] ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
return self._token_to_id.get(SCREAMING_SNAKE_CASE_ , self._token_to_id.get(self.unk_token ) )
def __UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ):
return self._id_to_token.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def __UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : List , SCREAMING_SNAKE_CASE_ : Optional[List] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCamelCase__ = [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(SCREAMING_SNAKE_CASE_ ) + [1]
return mask
def __UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(SCREAMING_SNAKE_CASE_ , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def __UpperCAmelCase ( self : Dict ):
return self.get_vocab_size(with_added_tokens=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[List[str], List[AddedToken]] , SCREAMING_SNAKE_CASE_ : bool = False ):
return super()._add_tokens(SCREAMING_SNAKE_CASE_ , special_tokens=SCREAMING_SNAKE_CASE_ )
| 258 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Distribution , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : List[str]=0 ):
lowerCamelCase__ = 1.0 if scale is None else scale
lowerCamelCase__ = 0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE_ )] )
@property
def __UpperCAmelCase ( self : Dict ):
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCAmelCase ( self : List[str] ):
return self.base_dist.variance * self.scale**2
@property
def __UpperCAmelCase ( self : int ):
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = args_dim
lowerCamelCase__ = nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for dim in args_dim.values()] )
lowerCamelCase__ = domain_map
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : torch.Tensor ):
lowerCamelCase__ = [proj(SCREAMING_SNAKE_CASE_ ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
super().__init__()
lowerCamelCase__ = function
def __UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , *SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.function(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ :
snake_case = 42
snake_case = 42
snake_case = 42
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int = 1 ):
lowerCamelCase__ = dim
lowerCamelCase__ = {k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE_ )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE_ ) , 1 )
def __UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , ):
lowerCamelCase__ = self._base_distribution(SCREAMING_SNAKE_CASE_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE_ , loc=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , event_dim=self.event_dim )
@property
def __UpperCAmelCase ( self : Optional[int] ):
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCAmelCase ( self : List[Any] ):
return len(self.event_shape )
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
return 0.0
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int ):
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCAmelCase ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : torch.Tensor ):
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE_ ) + 4.0 )) / 2.0
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = {"df": 1, "loc": 1, "scale": 1}
snake_case = StudentT
@classmethod
def __UpperCAmelCase ( cls : Dict , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor ):
lowerCamelCase__ = cls.squareplus(SCREAMING_SNAKE_CASE_ ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCamelCase__ = 2.0 + cls.squareplus(SCREAMING_SNAKE_CASE_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = {"loc": 1, "scale": 1}
snake_case = Normal
@classmethod
def __UpperCAmelCase ( cls : Tuple , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor ):
lowerCamelCase__ = cls.squareplus(SCREAMING_SNAKE_CASE_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = {"total_count": 1, "logits": 1}
snake_case = NegativeBinomial
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor ):
lowerCamelCase__ = cls.squareplus(SCREAMING_SNAKE_CASE_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCamelCase__ , lowerCamelCase__ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ ) , 1 )
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None ):
lowerCamelCase__ , lowerCamelCase__ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 258 | 1 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowerCAmelCase ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE : List[Any] =module
SCREAMING_SNAKE_CASE : List[str] =nn.Sequential(
nn.Linear(module.in_features , snake_case_ , bias=snake_case_ ) , nn.Linear(snake_case_ , module.out_features , bias=snake_case_ ) , )
SCREAMING_SNAKE_CASE : Union[str, Any] =(2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=snake_case_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __a ( self , snake_case_ , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
return self.module(snake_case_ , *snake_case_ , **snake_case_ ) + self.adapter(snake_case_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCamelCase__ = 'bigscience/bloom-1b7'
# Constant values
lowerCamelCase__ = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase__ = 'Hello my name is'
lowerCamelCase__ = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
lowerCamelCase__ = 10
def __a ( self ) -> List[str]:
# Models and tokenizer
SCREAMING_SNAKE_CASE : Any =AutoTokenizer.from_pretrained(self.model_name )
class _lowerCAmelCase ( UpperCamelCase__ ):
def __a ( self ) -> int:
super().setUp()
# Models and tokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] =AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
SCREAMING_SNAKE_CASE : int =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map='''auto''' )
def __a ( self ) -> List[str]:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Dict:
SCREAMING_SNAKE_CASE : int =self.model_abit.config
self.assertTrue(hasattr(snake_case_ , '''quantization_config''' ) )
SCREAMING_SNAKE_CASE : Optional[int] =config.to_dict()
SCREAMING_SNAKE_CASE : Any =config.to_diff_dict()
SCREAMING_SNAKE_CASE : List[Any] =config.to_json_string()
def __a ( self ) -> List[str]:
from bitsandbytes.nn import Paramsabit
SCREAMING_SNAKE_CASE : Tuple =self.model_fpaa.get_memory_footprint()
SCREAMING_SNAKE_CASE : Tuple =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
SCREAMING_SNAKE_CASE : Union[str, Any] =get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __a ( self ) -> List[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(snake_case_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __a ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : str =self.tokenizer(self.input_text , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : int =self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
def __a ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Dict =BitsAndBytesConfig()
SCREAMING_SNAKE_CASE : Optional[int] =True
SCREAMING_SNAKE_CASE : Tuple =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=snake_case_ , device_map='''auto''' )
SCREAMING_SNAKE_CASE : Optional[int] =self.tokenizer(self.input_text , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Optional[int] =model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
def __a ( self ) -> Tuple:
with self.assertRaises(snake_case_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(snake_case_ )
def __a ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] =BitsAndBytesConfig()
with self.assertRaises(snake_case_ ):
SCREAMING_SNAKE_CASE : Tuple =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=snake_case_ , load_in_abit=snake_case_ , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def __a ( self ) -> List[str]:
with self.assertRaises(snake_case_ ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(snake_case_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
SCREAMING_SNAKE_CASE : Union[str, Any] =self.tokenizer(self.input_text , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Tuple =self.model_fpaa.to(torch.floataa )
SCREAMING_SNAKE_CASE : Union[str, Any] =self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
SCREAMING_SNAKE_CASE : str =self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
SCREAMING_SNAKE_CASE : Dict =self.model_fpaa.half()
# Check this does not throw an error
SCREAMING_SNAKE_CASE : int =self.model_fpaa.float()
def __a ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Tuple =AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=snake_case_ , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
@classmethod
def __a ( cls ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Any ='''t5-small'''
SCREAMING_SNAKE_CASE : Any ='''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
SCREAMING_SNAKE_CASE : List[Any] =AutoTokenizer.from_pretrained(cls.model_name )
SCREAMING_SNAKE_CASE : Dict ='''Translate in German: Hello, my dog is cute'''
def __a ( self ) -> Any:
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Union[str, Any]:
from transformers import TaForConditionalGeneration
SCREAMING_SNAKE_CASE : str =TaForConditionalGeneration._keep_in_fpaa_modules
SCREAMING_SNAKE_CASE : Optional[int] =None
# test with `t5-small`
SCREAMING_SNAKE_CASE : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map='''auto''' )
SCREAMING_SNAKE_CASE : int =self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
SCREAMING_SNAKE_CASE : List[Any] =model.generate(**snake_case_ )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE : Optional[Any] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=snake_case_ , device_map='''auto''' )
SCREAMING_SNAKE_CASE : int =self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
SCREAMING_SNAKE_CASE : Optional[Any] =model.generate(**snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] =modules
def __a ( self ) -> int:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
SCREAMING_SNAKE_CASE : List[str] =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
SCREAMING_SNAKE_CASE : Optional[int] =self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
SCREAMING_SNAKE_CASE : Optional[int] =model.generate(**snake_case_ )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE : Dict =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=snake_case_ , device_map='''auto''' )
SCREAMING_SNAKE_CASE : str =self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
SCREAMING_SNAKE_CASE : List[Any] =model.generate(**snake_case_ )
class _lowerCAmelCase ( UpperCamelCase__ ):
def __a ( self ) -> str:
super().setUp()
# model_name
SCREAMING_SNAKE_CASE : Optional[int] ='''bigscience/bloom-560m'''
SCREAMING_SNAKE_CASE : Optional[int] ='''t5-small'''
# Different types of model
SCREAMING_SNAKE_CASE : Any =AutoModel.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map='''auto''' )
# Sequence classification model
SCREAMING_SNAKE_CASE : int =AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=snake_case_ , device_map='''auto''' )
# CausalLM model
SCREAMING_SNAKE_CASE : int =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map='''auto''' )
# Seq2seq model
SCREAMING_SNAKE_CASE : Union[str, Any] =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=snake_case_ , device_map='''auto''' )
def __a ( self ) -> int:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Union[str, Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowerCAmelCase ( UpperCamelCase__ ):
def __a ( self ) -> Optional[Any]:
super().setUp()
def __a ( self ) -> List[str]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Union[str, Any] =pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
SCREAMING_SNAKE_CASE : Tuple =self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowerCAmelCase ( UpperCamelCase__ ):
def __a ( self ) -> Union[str, Any]:
super().setUp()
def __a ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : Union[str, Any] =AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=snake_case_ , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
SCREAMING_SNAKE_CASE : Optional[int] =self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
SCREAMING_SNAKE_CASE : Optional[Any] =model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
class _lowerCAmelCase ( UpperCamelCase__ ):
def __a ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : Any ='''facebook/opt-350m'''
super().setUp()
def __a ( self ) -> Union[str, Any]:
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
SCREAMING_SNAKE_CASE : Union[str, Any] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
SCREAMING_SNAKE_CASE : Union[str, Any] =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
SCREAMING_SNAKE_CASE : Union[str, Any] =param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(snake_case_ ) ):
SCREAMING_SNAKE_CASE : Tuple =LoRALayer(module.q_proj , rank=16 )
SCREAMING_SNAKE_CASE : int =LoRALayer(module.k_proj , rank=16 )
SCREAMING_SNAKE_CASE : List[str] =LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
SCREAMING_SNAKE_CASE : int =self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE : Union[str, Any] =model.forward(**snake_case_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(snake_case_ , snake_case_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(snake_case_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowerCAmelCase ( UpperCamelCase__ ):
lowerCamelCase__ = 'gpt2-xl'
lowerCamelCase__ = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 258 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_A = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_A = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
_A = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
_A = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCamelCase__ ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = LxmertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
SCREAMING_SNAKE_CASE : List[str] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , snake_case_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , snake_case_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , snake_case_ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : str =getattr(snake_case_ , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[str] =do_lower_case
SCREAMING_SNAKE_CASE : Dict =strip_accents
SCREAMING_SNAKE_CASE : Dict =tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Union[str, Any] =normalizer_class(**snake_case_ )
SCREAMING_SNAKE_CASE : str =do_lower_case
def __a ( self , snake_case_ , snake_case_=None ) -> Dict:
SCREAMING_SNAKE_CASE : List[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , snake_case_ , snake_case_ = None ) -> List[int]:
SCREAMING_SNAKE_CASE : Dict =[self.sep_token_id]
SCREAMING_SNAKE_CASE : Any =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE : List[Any] =self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 258 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCAmelCase__ ( lowerCamelCase__="" ) -> str:
A = tempfile.mkdtemp()
return os.path.join(lowerCamelCase__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
A = torch.rand(12 , dtype=torch.floataa ) - 0.5
A = AgentAudio(snake_case )
A = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case ) )
# Ensure that the file contains the same value as the original tensor
A , A = sf.read(snake_case )
self.assertTrue(torch.allclose(snake_case , torch.tensor(snake_case ) , atol=1E-4 ) )
def A_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
A = torch.rand(12 , dtype=torch.floataa ) - 0.5
A = get_new_path(suffix='.wav' )
sf.write(snake_case , snake_case , 16_000 )
A = AgentAudio(snake_case )
self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , snake_case )
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
def A_ ( self : Any ) -> Any:
'''simple docstring'''
A = torch.randint(0 , 256 , (64, 64, 3) )
A = AgentImage(snake_case )
A = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
A = Image.open(snake_case )
A = AgentImage(snake_case )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
def A_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
A = Image.open(snake_case )
A = AgentImage(snake_case )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case ) )
class UpperCAmelCase__ ( unittest.TestCase ):
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
A = 'Hey!'
A = AgentText(snake_case )
self.assertEqual(snake_case , agent_type.to_string() )
self.assertEqual(snake_case , agent_type.to_raw() )
self.assertEqual(snake_case , snake_case )
| 109 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
A = TypeVar('T')
class UpperCAmelCase__ ( Generic[T] ):
lowerCAmelCase_ : deque[T] # Cache store of keys
lowerCAmelCase_ : set[T] # References of the keys in cache
lowerCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : int , snake_case : int ) -> None:
'''simple docstring'''
A = deque()
A = set()
if not n:
A = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
A = n
def A_ ( self : Optional[Any] , snake_case : T ) -> None:
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
A = self.dq_store.pop()
self.key_reference.remove(snake_case )
else:
self.dq_store.remove(snake_case )
self.dq_store.appendleft(snake_case )
self.key_reference.add(snake_case )
def A_ ( self : Dict ) -> None:
'''simple docstring'''
for k in self.dq_store:
print(snake_case )
def __repr__( self : int ) -> str:
'''simple docstring'''
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
A = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 109 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
UpperCamelCase__ : Any = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 387 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = [[float('inf' ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_A = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(_SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(_SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_A = dist[i][k] + dist[k][j]
_print_dist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
__A : Dict = int(input("Enter number of vertices: "))
__A : Union[str, Any] = int(input("Enter number of edges: "))
__A : List[str] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
__A : List[Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
__A : Union[str, Any] = int(input("Enter source:"))
__A : List[str] = int(input("Enter destination:"))
__A : Union[str, Any] = float(input("Enter weight:"))
__A : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 27 | 0 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_lowercase = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
_lowercase = f'''https://www.google.com/search?q={query}&num=100'''
_lowercase = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
_lowercase = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
_lowercase = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 713 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class a_ ( UpperCAmelCase__ ):
lowercase_ : Dict = '''gpt_neo'''
lowercase_ : Tuple = ['''past_key_values''']
lowercase_ : List[str] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : List[str] , __lowerCAmelCase : Optional[int]=5_0_2_5_7 , __lowerCAmelCase : Tuple=2_0_4_8 , __lowerCAmelCase : str=2_0_4_8 , __lowerCAmelCase : Optional[Any]=2_4 , __lowerCAmelCase : Optional[Any]=[[["global", "local"], 1_2]] , __lowerCAmelCase : Optional[Any]=1_6 , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Any=2_5_6 , __lowerCAmelCase : str="gelu_new" , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : int=True , __lowerCAmelCase : Tuple=5_0_2_5_6 , __lowerCAmelCase : Any=5_0_2_5_6 , **__lowerCAmelCase : Optional[int] , ):
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = hidden_size
__snake_case = num_layers
__snake_case = num_heads
__snake_case = intermediate_size
__snake_case = window_size
__snake_case = activation_function
__snake_case = resid_dropout
__snake_case = embed_dropout
__snake_case = attention_dropout
__snake_case = classifier_dropout
__snake_case = layer_norm_epsilon
__snake_case = initializer_range
__snake_case = use_cache
__snake_case = bos_token_id
__snake_case = eos_token_id
__snake_case = attention_types
__snake_case = self.expand_attention_types_params(__lowerCAmelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
F'`config.num_layers = {self.num_layers}`. '
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@staticmethod
def lowercase__ ( __lowerCAmelCase : Optional[Any] ):
__snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase__ ( a , a , a , a ):
import torch
__snake_case = input.size()
__snake_case = len(a )
__snake_case = shape[dimension]
__snake_case = torch.arange(0 , a , a )
__snake_case = torch.div(sizedim - size , a , rounding_mode='floor' ) + 1
__snake_case = torch.arange(a ) + low_indices[:min_length][:, None]
__snake_case = [slice(a )] * rank
__snake_case = indices
__snake_case = input[s]
__snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(a )
def lowerCamelCase__ ( a , a ):
import torch
__snake_case = torch.arange(1 , a )
__snake_case = torch.remainder(a , a )
__snake_case = remainders == 0
__snake_case = candidates[divisor_indices]
__snake_case = torch.max(a )
return largest_divisor, torch.div(a , a , rounding_mode='floor' )
class a_ ( UpperCAmelCase__ ):
@property
def lowercase__ ( self : Optional[Any] ):
__snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase , direction='inputs' )
__snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowercase__ ( self : Tuple ):
return self._config.num_heads
def lowercase__ ( self : int , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ):
__snake_case = super(__lowerCAmelCase , self ).generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
__snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__snake_case , __snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__snake_case = seqlen + 2
__snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__snake_case = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(self.num_layers )
]
__snake_case = common_inputs['attention_mask']
if self.use_past:
__snake_case = ordered_inputs['attention_mask'].dtype
__snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def lowercase__ ( self : str ):
return 1_3
| 427 | 0 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
'''simple docstring'''
def __init__(self : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str]=13 , _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : Optional[int]=[10, 20, 30, 40] , _UpperCAmelCase : str=[2, 2, 3, 2] , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : List[str]=37 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Dict=10 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : int=["stage2", "stage3", "stage4"] , _UpperCAmelCase : List[Any]=[2, 3, 4] , _UpperCAmelCase : int=None , ) -> List[str]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = num_stages
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = initializer_range
lowercase__ = out_features
lowercase__ = out_indices
lowercase__ = scope
def lowerCamelCase__ (self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ (self : Any ) -> Dict:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = ConvNextModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = ConvNextForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = ConvNextBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ = None
lowercase__ = ConvNextBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A__ = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
A__ = True
A__ = False
A__ = False
A__ = False
A__ = False
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = ConvNextModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def lowerCamelCase__ (self : Dict ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def lowerCamelCase__ (self : Optional[Any] ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ):
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowerCamelCase__ (self : Tuple ) -> Optional[int]:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ConvNextModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase__ (self : Any ) -> Dict:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ (self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_UpperCAmelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )
# verify the logits
lowercase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowercase__ = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@require_torch
class A ( unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
A__ = (ConvNextBackbone,) if is_torch_available() else ()
A__ = ConvNextConfig
A__ = False
def lowerCamelCase__ (self : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = ConvNextModelTester(self )
| 15 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Optional[int] = int(_lowerCAmelCase )
# Initialize Result
A : int = []
# Traverse through all denomination
for denomination in reversed(_lowerCAmelCase ):
# Find denominations
while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ):
total_value -= int(_lowerCAmelCase )
answer.append(_lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = []
SCREAMING_SNAKE_CASE_:Dict = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
SCREAMING_SNAKE_CASE_:Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
SCREAMING_SNAKE_CASE_:Optional[Any] = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
SCREAMING_SNAKE_CASE_:str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 662 | 0 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
SCREAMING_SNAKE_CASE : Any = get_tests_dir('''fixtures/dummy-config.json''')
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = 0
def _UpperCAmelCase ( self ):
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(__a , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = AutoConfig.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = AutoConfig.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = AutoConfig.for_model('roberta' )
self.assertIsInstance(__a , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
A__ = os.path.join(__a , 'fake-roberta' )
os.makedirs(__a , exist_ok=__a )
with open(os.path.join(__a , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
A__ = AutoConfig.from_pretrained(__a )
self.assertEqual(type(__a ) , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
try:
AutoConfig.register('custom' , __a )
# Wrong model type will raise an error
with self.assertRaises(__a ):
AutoConfig.register('model' , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoConfig.register('bert' , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a )
A__ = AutoConfig.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _UpperCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__a , 'bert-base is not a local folder and is not a valid model identifier' ):
A__ = AutoConfig.from_pretrained('bert-base' )
def _UpperCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__a , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
A__ = AutoConfig.from_pretrained(__a , revision='aaaaaa' )
def _UpperCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__a , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
A__ = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def _UpperCAmelCase ( self ):
"""simple docstring"""
with self.assertRaises(__a ):
A__ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
A__ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=__a )
A__ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=__a )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a )
A__ = AutoConfig.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def _UpperCAmelCase ( self ):
"""simple docstring"""
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Optional[Any] = """new-model"""
try:
AutoConfig.register('new-model' , __a )
# If remote code is not set, the default is to use local
A__ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
A__ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=__a )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
A__ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=__a )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 554 |
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = '''ybelkada/fonts'''
def __lowerCamelCase ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
'Pix2StructImageProcessor. Please upgrade torch.' )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
requires_backends(lowerCAmelCase__ ,['torch'] )
_check_torch_version()
A__ = image_tensor.unsqueeze(0 )
A__ = torch.nn.functional.unfold(lowerCAmelCase__ ,(patch_height, patch_width) ,stride=(patch_height, patch_width) )
A__ = patches.reshape(image_tensor.size(0 ) ,image_tensor.size(1 ) ,lowerCAmelCase__ ,lowerCAmelCase__ ,-1 )
A__ = patches.permute(0 ,4 ,2 ,3 ,1 ).reshape(
image_tensor.size(2 ) // patch_height ,image_tensor.size(3 ) // patch_width ,image_tensor.size(1 ) * patch_height * patch_width ,)
return patches.unsqueeze(0 )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ = 36 ,lowerCAmelCase__ = "black" ,lowerCAmelCase__ = "white" ,lowerCAmelCase__ = 5 ,lowerCAmelCase__ = 5 ,lowerCAmelCase__ = 5 ,lowerCAmelCase__ = 5 ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,):
requires_backends(lowerCAmelCase__ ,'vision' )
# Add new lines so that each line is no more than 80 characters.
A__ = textwrap.TextWrapper(width=80 )
A__ = wrapper.wrap(text=lowerCAmelCase__ )
A__ = '\n'.join(lowerCAmelCase__ )
if font_bytes is not None and font_path is None:
A__ = io.BytesIO(lowerCAmelCase__ )
elif font_path is not None:
A__ = font_path
else:
A__ = hf_hub_download(lowerCAmelCase__ ,'Arial.TTF' )
A__ = ImageFont.truetype(lowerCAmelCase__ ,encoding='UTF-8' ,size=lowerCAmelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
A__ = ImageDraw.Draw(Image.new('RGB' ,(1, 1) ,lowerCAmelCase__ ) )
A__ , A__ , A__ , A__ = temp_draw.textbbox((0, 0) ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Create the actual image with a bit of padding around the text.
A__ = text_width + left_padding + right_padding
A__ = text_height + top_padding + bottom_padding
A__ = Image.new('RGB' ,(image_width, image_height) ,lowerCAmelCase__ )
A__ = ImageDraw.Draw(lowerCAmelCase__ )
draw.text(xy=(left_padding, top_padding) ,text=lowerCAmelCase__ ,fill=lowerCAmelCase__ ,font=lowerCAmelCase__ )
return image
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,**lowerCAmelCase__ ):
requires_backends(lowerCAmelCase__ ,'vision' )
# Convert to PIL image if necessary
A__ = to_pil_image(lowerCAmelCase__ )
A__ = render_text(lowerCAmelCase__ ,**lowerCAmelCase__ )
A__ = max(header_image.width ,image.width )
A__ = int(image.height * (new_width / image.width) )
A__ = int(header_image.height * (new_width / header_image.width) )
A__ = Image.new('RGB' ,(new_width, new_height + new_header_height) ,'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) ,(0, 0) )
new_image.paste(image.resize((new_width, new_height) ) ,(0, new_header_height) )
# Convert back to the original framework if necessary
A__ = to_numpy_array(lowerCAmelCase__ )
if infer_channel_dimension_format(lowerCAmelCase__ ) == ChannelDimension.LAST:
A__ = to_channel_dimension_format(lowerCAmelCase__ ,ChannelDimension.LAST )
return new_image
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Optional[int] = ["""flattened_patches"""]
def __init__( self , __a = True , __a = True , __a = None , __a = 2048 , __a = False , **__a , ):
"""simple docstring"""
super().__init__(**__a )
A__ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
A__ = do_normalize
A__ = do_convert_rgb
A__ = max_patches
A__ = is_vqa
def _UpperCAmelCase ( self , __a , __a , __a , **__a ):
"""simple docstring"""
requires_backends(self.extract_flattened_patches , 'torch' )
_check_torch_version()
# convert to torch
A__ = to_channel_dimension_format(__a , ChannelDimension.FIRST )
A__ = torch.from_numpy(__a )
A__ , A__ = patch_size['height'], patch_size['width']
A__ , A__ = get_image_size(__a )
# maximize scale s.t.
A__ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
A__ = max(min(math.floor(scale * image_height / patch_height ) , __a ) , 1 )
A__ = max(min(math.floor(scale * image_width / patch_width ) , __a ) , 1 )
A__ = max(num_feasible_rows * patch_height , 1 )
A__ = max(num_feasible_cols * patch_width , 1 )
A__ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=__a , antialias=__a , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
A__ = torch_extract_patches(__a , __a , __a )
A__ = patches.shape
A__ = patches_shape[1]
A__ = patches_shape[2]
A__ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
A__ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
A__ = torch.arange(__a ).reshape([rows, 1] ).repeat(1 , __a ).reshape([rows * columns, 1] )
A__ = torch.arange(__a ).reshape([1, columns] ).repeat(__a , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
A__ = row_ids.to(torch.floataa )
A__ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
A__ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
A__ = torch.nn.functional.pad(__a , [0, 0, 0, max_patches - (rows * columns)] ).float()
A__ = to_numpy_array(__a )
return result
def _UpperCAmelCase ( self , __a , __a = None , **__a ):
"""simple docstring"""
if image.dtype == np.uinta:
A__ = image.astype(np.floataa )
# take mean across the whole `image`
A__ = np.mean(__a )
A__ = np.std(__a )
A__ = max(__a , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(__a , mean=__a , std=__a , **__a )
def _UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ):
"""simple docstring"""
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = patch_size if patch_size is not None else self.patch_size
A__ = max_patches if max_patches is not None else self.max_patches
A__ = self.is_vqa
if kwargs.get('data_format' , __a ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
A__ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(__a ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
A__ = kwargs.pop('font_bytes' , __a )
A__ = kwargs.pop('font_path' , __a )
if isinstance(__a , __a ):
A__ = [header_text] * len(__a )
A__ = [
render_header(__a , header_text[i] , font_bytes=__a , font_path=__a )
for i, image in enumerate(__a )
]
if do_normalize:
A__ = [self.normalize(image=__a ) for image in images]
# convert to torch tensor and permute
A__ = [
self.extract_flattened_patches(image=__a , max_patches=__a , patch_size=__a )
for image in images
]
# create attention mask in numpy
A__ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
A__ = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=__a )
return encoded_outputs
| 554 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowerCAmelCase = re.compile(r"\b(a|an|the)\b", re.UNICODE)
__lowerCAmelCase = None
def __lowerCamelCase ( ) -> int:
_UpperCAmelCase = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=_lowerCAmelCase , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=_lowerCAmelCase , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
_UpperCAmelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_UpperCAmelCase = bool(qa["answers"]["text"] )
return qid_to_has_ans
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
def remove_articles(_lowerCAmelCase ):
return ARTICLES_REGEX.sub(" " , _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def __lowerCamelCase ( _lowerCAmelCase ) -> Dict:
if not s:
return []
return normalize_answer(_lowerCAmelCase ).split()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = get_tokens(_lowerCAmelCase )
_UpperCAmelCase = get_tokens(_lowerCAmelCase )
_UpperCAmelCase = collections.Counter(_lowerCAmelCase ) & collections.Counter(_lowerCAmelCase )
_UpperCAmelCase = sum(common.values() )
if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_lowerCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_lowerCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
_UpperCAmelCase = {}
_UpperCAmelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_UpperCAmelCase = qa["id"]
_UpperCAmelCase = [t for t in qa["answers"]["text"] if normalize_answer(_lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_UpperCAmelCase = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
_UpperCAmelCase = preds[qid]
# Take max over all gold answers
_UpperCAmelCase = max(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for a in gold_answers )
_UpperCAmelCase = max(compute_fa(_lowerCAmelCase , _lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = {}
for qid, s in scores.items():
_UpperCAmelCase = na_probs[qid] > na_prob_thresh
if pred_na:
_UpperCAmelCase = float(not qid_to_has_ans[qid] )
else:
_UpperCAmelCase = s
return new_scores
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
if not qid_list:
_UpperCAmelCase = len(_lowerCAmelCase )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
_UpperCAmelCase = len(_lowerCAmelCase )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
for k in new_eval:
_UpperCAmelCase = new_eval[k]
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
plt.step(_lowerCAmelCase , _lowerCAmelCase , color="b" , alpha=0.2 , where="post" )
plt.fill_between(_lowerCAmelCase , _lowerCAmelCase , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_lowerCAmelCase )
plt.savefig(_lowerCAmelCase )
plt.clf()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Union[str, Any]:
_UpperCAmelCase = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : na_probs[k] )
_UpperCAmelCase = 0.0
_UpperCAmelCase = 1.0
_UpperCAmelCase = 0.0
_UpperCAmelCase = [1.0]
_UpperCAmelCase = [0.0]
_UpperCAmelCase = 0.0
for i, qid in enumerate(_lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_UpperCAmelCase = true_pos / float(i + 1 )
_UpperCAmelCase = true_pos / float(_lowerCAmelCase )
if i == len(_lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_lowerCAmelCase )
recalls.append(_lowerCAmelCase )
if out_image:
plot_pr_curve(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if out_image_dir and not os.path.exists(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
_UpperCAmelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_UpperCAmelCase = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
_UpperCAmelCase = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
_UpperCAmelCase = {k: float(_lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
_UpperCAmelCase = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , "pr_exact" )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , "pr_f1" )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , "pr_oracle" )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
if not qid_list:
return
_UpperCAmelCase = [na_probs[k] for k in qid_list]
_UpperCAmelCase = np.ones_like(_lowerCAmelCase ) / float(len(_lowerCAmelCase ) )
plt.hist(_lowerCAmelCase , weights=_lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(_lowerCAmelCase , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
_UpperCAmelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_UpperCAmelCase = num_no_ans
_UpperCAmelCase = cur_score
_UpperCAmelCase = 0.0
_UpperCAmelCase = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(_lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_UpperCAmelCase = scores[qid]
else:
if preds[qid]:
_UpperCAmelCase = -1
else:
_UpperCAmelCase = 0
cur_score += diff
if cur_score > best_score:
_UpperCAmelCase = cur_score
_UpperCAmelCase = na_probs[qid]
return 100.0 * best_score / len(_lowerCAmelCase ), best_thresh
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
_UpperCAmelCase , _UpperCAmelCase = find_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = find_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = best_exact
_UpperCAmelCase = exact_thresh
_UpperCAmelCase = best_fa
_UpperCAmelCase = fa_thresh
def __lowerCamelCase ( ) -> int:
with open(OPTS.data_file ) as f:
_UpperCAmelCase = json.load(_lowerCAmelCase )
_UpperCAmelCase = dataset_json["data"]
with open(OPTS.pred_file ) as f:
_UpperCAmelCase = json.load(_lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_UpperCAmelCase = json.load(_lowerCAmelCase )
else:
_UpperCAmelCase = {k: 0.0 for k in preds}
_UpperCAmelCase = make_qid_to_has_ans(_lowerCAmelCase ) # maps qid to True/False
_UpperCAmelCase = [k for k, v in qid_to_has_ans.items() if v]
_UpperCAmelCase = [k for k, v in qid_to_has_ans.items() if not v]
_UpperCAmelCase , _UpperCAmelCase = get_raw_scores(_lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = apply_no_ans_threshold(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.na_prob_thresh )
_UpperCAmelCase = apply_no_ans_threshold(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.na_prob_thresh )
_UpperCAmelCase = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase )
if has_ans_qids:
_UpperCAmelCase = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase , qid_list=_lowerCAmelCase )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , "HasAns" )
if no_ans_qids:
_UpperCAmelCase = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase , qid_list=_lowerCAmelCase )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(_lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(_lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
else:
print(json.dumps(_lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
__lowerCAmelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 684 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[Any] = ["""input_ids""", """attention_mask"""]
__SCREAMING_SNAKE_CASE : List[str] = BlenderbotTokenizer
def __init__( self : Tuple , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[str]=True , **__UpperCamelCase : int , ):
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**__UpperCamelCase )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = "post_processor"
_UpperCAmelCase = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase = tuple(state["cls"] )
_UpperCAmelCase = False
if state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get("trim_offsets" , __UpperCamelCase ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(__UpperCamelCase , state.pop("type" ) )
_UpperCAmelCase = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
_UpperCAmelCase = value
def UpperCAmelCase__ ( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : "Conversation" ):
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
_UpperCAmelCase = " ".join(__UpperCamelCase )
_UpperCAmelCase = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 684 | 1 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
for part_id in partition_order:
_UpperCAmelCase = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(lowercase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
_UpperCAmelCase = spark.range(1_00 ).repartition(1 )
_UpperCAmelCase = Spark(lowercase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
_UpperCAmelCase = spark.range(10 ).repartition(2 )
_UpperCAmelCase = [1, 0]
_UpperCAmelCase = _generate_iterable_examples(lowercase ,lowercase ) # Reverse the partitions.
_UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase ,lowercase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_UpperCAmelCase , _UpperCAmelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
_UpperCAmelCase = spark.range(10 ).repartition(1 )
_UpperCAmelCase = SparkExamplesIterable(lowercase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowercase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
_UpperCAmelCase = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
_UpperCAmelCase = lambda lowercase : x.reverse()
_UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase ,[2, 1, 0] )
_UpperCAmelCase = SparkExamplesIterable(lowercase ).shuffle_data_sources(lowercase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowercase ):
_UpperCAmelCase , _UpperCAmelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
_UpperCAmelCase = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_UpperCAmelCase = SparkExamplesIterable(lowercase ).shard_data_sources(worker_id=0 ,num_workers=2 )
assert shard_it_a.n_shards == 2
_UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase ,[0, 2] )
for i, (row_id, row_dict) in enumerate(lowercase ):
_UpperCAmelCase , _UpperCAmelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_UpperCAmelCase = SparkExamplesIterable(lowercase ).shard_data_sources(worker_id=1 ,num_workers=2 )
assert shard_it_a.n_shards == 2
_UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase ,[1, 3] )
for i, (row_id, row_dict) in enumerate(lowercase ):
_UpperCAmelCase , _UpperCAmelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
_UpperCAmelCase = spark.range(1_00 ).repartition(1 )
_UpperCAmelCase = Spark(lowercase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 275 | """simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a ( lowerCAmelCase_ ):
@staticmethod
@abstractmethod
def lowerCAmelCase_ ( __lowerCAmelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def lowerCAmelCase_ ( self : int ):
raise NotImplementedError()
| 275 | 1 |
# Lint as: python3
import itertools
import os
import re
UpperCAmelCase__ = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
UpperCAmelCase__ = re.compile(r'''([a-z\d])([A-Z])''')
UpperCAmelCase__ = re.compile(r'''(?<!_)_(?!_)''')
UpperCAmelCase__ = re.compile(r'''(_{2,})''')
UpperCAmelCase__ = r'''^\w+(\.\w+)*$'''
UpperCAmelCase__ = r'''<>:/\|?*'''
def a_ (__A ) -> List[Any]:
"""simple docstring"""
__a : int = _uppercase_uppercase_re.sub(r"\1_\2" , __A )
__a : List[Any] = _lowercase_uppercase_re.sub(r"\1_\2" , __A )
return name.lower()
def a_ (__A ) -> Dict:
"""simple docstring"""
__a : Optional[Any] = _single_underscore_re.split(__A )
__a : Tuple = [_multiple_underscores_re.split(__A ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__A ) if n != "" )
def a_ (__A ) -> Union[str, Any]:
"""simple docstring"""
if os.path.basename(__A ) != name:
raise ValueError(f'Should be a dataset name, not a path: {name}' )
return camelcase_to_snakecase(__A )
def a_ (__A , __A ) -> Union[str, Any]:
"""simple docstring"""
if os.path.basename(__A ) != name:
raise ValueError(f'Should be a dataset name, not a path: {name}' )
if not re.match(_split_re , __A ):
raise ValueError(f'Split name should match \'{_split_re}\'\' but got \'{split}\'.' )
return f'{filename_prefix_for_name(__A )}-{split}'
def a_ (__A , __A , __A , __A=None ) -> Union[str, Any]:
"""simple docstring"""
__a : int = filename_prefix_for_split(__A , __A )
if filetype_suffix:
prefix += f'.{filetype_suffix}'
__a : List[Any] = os.path.join(__A , __A )
return f'{filepath}*'
def a_ (__A , __A , __A , __A=None , __A=None ) -> int:
"""simple docstring"""
__a : List[Any] = filename_prefix_for_split(__A , __A )
__a : Any = os.path.join(__A , __A )
if shard_lengths:
__a : Tuple = len(__A )
__a : List[Any] = [f'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(__A )]
if filetype_suffix:
__a : Any = [filename + f'.{filetype_suffix}' for filename in filenames]
return filenames
else:
__a : str = prefix
if filetype_suffix:
filename += f'.{filetype_suffix}'
return [filename]
| 351 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class snake_case_ ( __UpperCamelCase ):
"""simple docstring"""
snake_case__ = """glpn"""
def __init__(self: Tuple , __UpperCAmelCase: Tuple=3 , __UpperCAmelCase: Dict=4 , __UpperCAmelCase: Any=[2, 2, 2, 2] , __UpperCAmelCase: Optional[int]=[8, 4, 2, 1] , __UpperCAmelCase: Dict=[32, 64, 160, 256] , __UpperCAmelCase: List[str]=[7, 3, 3, 3] , __UpperCAmelCase: Dict=[4, 2, 2, 2] , __UpperCAmelCase: Optional[int]=[1, 2, 5, 8] , __UpperCAmelCase: Dict=[4, 4, 4, 4] , __UpperCAmelCase: List[str]="gelu" , __UpperCAmelCase: str=0.0 , __UpperCAmelCase: List[Any]=0.0 , __UpperCAmelCase: Dict=0.02 , __UpperCAmelCase: List[str]=0.1 , __UpperCAmelCase: Union[str, Any]=1E-6 , __UpperCAmelCase: Dict=64 , __UpperCAmelCase: Dict=10 , __UpperCAmelCase: Union[str, Any]=-1 , **__UpperCAmelCase: Dict , ) -> str:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__a : Optional[Any] = num_channels
__a : Tuple = num_encoder_blocks
__a : int = depths
__a : Union[str, Any] = sr_ratios
__a : str = hidden_sizes
__a : List[Any] = patch_sizes
__a : Optional[Any] = strides
__a : Any = mlp_ratios
__a : Any = num_attention_heads
__a : Any = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : int = attention_probs_dropout_prob
__a : List[str] = initializer_range
__a : Optional[int] = drop_path_rate
__a : Any = layer_norm_eps
__a : List[Any] = decoder_hidden_size
__a : Any = max_depth
__a : int = head_in_index
| 351 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__UpperCamelCase : str = trt.Logger(trt.Logger.WARNING)
__UpperCamelCase : Dict = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__UpperCamelCase : Dict = logging.getLogger(__name__)
__UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=3_8_4,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=1_2_8,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=2_0,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=3_0,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=4_2, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
__UpperCamelCase : List[Any] = parser.parse_args()
if args.tokenizer_name:
__UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
__UpperCamelCase : Dict = args.per_device_eval_batch_size
__UpperCamelCase : Dict = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Dict = "temp_engine/bert-fp32.engine"
if args.fpaa:
__UpperCamelCase : Optional[Any] = "temp_engine/bert-fp16.engine"
if args.inta:
__UpperCamelCase : Any = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
__UpperCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__UpperCamelCase : List[str] = [network.get_input(i) for i in range(network.num_inputs)]
__UpperCamelCase : Optional[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__UpperCamelCase : Tuple = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__UpperCamelCase : Optional[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__UpperCamelCase : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def __UpperCAmelCase ( _snake_case : Union[str, Any], _snake_case : Tuple, _snake_case : Union[str, Any], _snake_case : List[str], _snake_case : List[Any], _snake_case : List[Any], _snake_case : Dict, _snake_case : List[str] ):
_lowercase = np.asarray(inputs["input_ids"], dtype=np.intaa )
_lowercase = np.asarray(inputs["attention_mask"], dtype=np.intaa )
_lowercase = np.asarray(inputs["token_type_ids"], dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), _snake_case )
cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), _snake_case )
cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), _snake_case )
# start time
_lowercase = time.time()
# Run inference
context.execute_async(
bindings=[int(_snake_case ) for d_inp in d_inputs] + [int(_snake_case ), int(_snake_case )], stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_snake_case, _snake_case, _snake_case )
cuda.memcpy_dtoh_async(_snake_case, _snake_case, _snake_case )
# Synchronize the stream and take time
stream.synchronize()
# end time
_lowercase = time.time()
_lowercase = end_time - start_time
_lowercase = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__UpperCamelCase : Optional[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__UpperCamelCase : List[str] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__UpperCamelCase : Union[str, Any] = raw_datasets["validation"].column_names
__UpperCamelCase : Dict = "question" if "question" in column_names else column_names[0]
__UpperCamelCase : List[str] = "context" if "context" in column_names else column_names[1]
__UpperCamelCase : Tuple = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__UpperCamelCase : Optional[Any] = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__UpperCamelCase : List[str] = min(args.max_seq_length, tokenizer.model_max_length)
def __UpperCAmelCase ( _snake_case : Dict ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
_lowercase = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_lowercase = tokenizer(
examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation="only_second" if pad_on_right else "only_first", max_length=_snake_case, stride=args.doc_stride, return_overflowing_tokens=_snake_case, return_offsets_mapping=_snake_case, padding="max_length", )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_lowercase = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_lowercase = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_lowercase = tokenized_examples.sequence_ids(_snake_case )
_lowercase = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_lowercase = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_lowercase = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
__UpperCamelCase : Optional[Any] = raw_datasets["validation"]
# Validation Feature Creation
__UpperCamelCase : Optional[Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
__UpperCamelCase : List[Any] = default_data_collator
__UpperCamelCase : Any = eval_dataset.remove_columns(["example_id", "offset_mapping"])
__UpperCamelCase : Any = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def __UpperCAmelCase ( _snake_case : Dict, _snake_case : Dict, _snake_case : List[Any], _snake_case : Any="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
_lowercase = postprocess_qa_predictions(
examples=_snake_case, features=_snake_case, predictions=_snake_case, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=_snake_case, )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_lowercase = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
_lowercase = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
_lowercase = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_snake_case, label_ids=_snake_case )
__UpperCamelCase : Tuple = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def __UpperCAmelCase ( _snake_case : List[Any] ):
return trt.volume(engine.get_binding_shape(_snake_case ) ) * engine.get_binding_dtype(_snake_case ).itemsize
# Allocate device memory for inputs and outputs.
__UpperCamelCase : Optional[Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__UpperCamelCase : List[str] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__UpperCamelCase : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__UpperCamelCase : Any = cuda.mem_alloc(h_outputa.nbytes)
__UpperCamelCase : str = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__UpperCamelCase : str = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__UpperCamelCase : str = 0.0
__UpperCamelCase : List[Any] = 0
__UpperCamelCase : int = timeit.default_timer()
__UpperCamelCase : Dict = None
for step, batch in enumerate(eval_dataloader):
__UpperCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__UpperCamelCase : Optional[int] = outputs
__UpperCamelCase : int = torch.tensor(start_logits)
__UpperCamelCase : str = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__UpperCamelCase : Any = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
__UpperCamelCase : Union[str, Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
__UpperCamelCase : Optional[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__UpperCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
__UpperCamelCase : Optional[int] = nested_truncate(all_preds, len(eval_dataset))
__UpperCamelCase : Any = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1_0_0_0 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1_0_0_0))
logger.info("Total Number of Inference = %d", niter)
__UpperCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds)
__UpperCamelCase : Optional[int] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 701 | """simple docstring"""
# flake8: noqa
# Lint as: python3
__UpperCamelCase : Optional[Any] = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 227 | 0 |
import sys
import turtle
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> None:
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(_SCREAMING_SNAKE_CASE ,get_mid(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,get_mid(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,depth - 1 )
triangle(_SCREAMING_SNAKE_CASE ,get_mid(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,get_mid(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,depth - 1 )
triangle(_SCREAMING_SNAKE_CASE ,get_mid(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,get_mid(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
SCREAMING_SNAKE_CASE__ : Dict = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
SCREAMING_SNAKE_CASE__ : str = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 311 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict[Optional[str], Type[Formatter]] = {}
SCREAMING_SNAKE_CASE__ : Dict[Optional[str], str] = {}
SCREAMING_SNAKE_CASE__ : Dict[Optional[str], Exception] = {}
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,) -> str:
lowerCamelCase : List[str] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
lowerCamelCase : Tuple = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
lowerCamelCase : Dict = format_type
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple:
lowerCamelCase : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowerCamelCase : str = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
SCREAMING_SNAKE_CASE__ : str = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
SCREAMING_SNAKE_CASE__ : str = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
SCREAMING_SNAKE_CASE__ : Tuple = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def A ( _SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Formatter:
lowerCamelCase : Tuple = get_format_type_from_alias(_SCREAMING_SNAKE_CASE )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_SCREAMING_SNAKE_CASE )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 311 | 1 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase : List[str] = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 1_3_1_0_7_2,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
}
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return torch.atana(lowerCamelCase , lowerCamelCase ) / math.pi * 2
def __lowerCAmelCase ( lowerCamelCase : List[str] ):
'''simple docstring'''
__lowerCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
__lowerCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase , lowerCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
pass
class UpperCAmelCase__ ( nn.Module ):
def __init__( self , UpperCamelCase ) -> Tuple:
super().__init__()
__lowerCAmelCase = DiffusionAttnUnetaD(UpperCamelCase , n_attn_layers=4 )
__lowerCAmelCase = deepcopy(self.diffusion )
__lowerCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = MODELS_MAP[model_name]["url"]
os.system(f'''wget {url} ./''' )
return f'''./{model_name}.ckpt'''
lowerCAmelCase : List[Any] = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCAmelCase : List[str] = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCAmelCase : List[Any] = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCAmelCase : Optional[int] = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCAmelCase : List[Any] = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCAmelCase : Tuple = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(f'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCAmelCase ( lowerCamelCase : str ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase ) and not isinstance(lowerCamelCase , lowerCamelCase ):
return name.replace(lowerCamelCase , lowerCamelCase )
elif name.startswith(lowerCamelCase ):
return [name.replace(lowerCamelCase , lowerCamelCase ) for v in value]
raise ValueError(f'''Attn error with {name}''' )
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[int]=13 ):
'''simple docstring'''
__lowerCAmelCase = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
__lowerCAmelCase = 0
if string.startswith("net.3." ):
depth += 1
__lowerCAmelCase = string[6:]
elif string.startswith("net." ):
__lowerCAmelCase = string[4:]
while string.startswith("main.7." ):
depth += 1
__lowerCAmelCase = string[7:]
if string.startswith("main." ):
__lowerCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
__lowerCAmelCase = string[:2]
__lowerCAmelCase = string[2:]
else:
__lowerCAmelCase = string[0]
__lowerCAmelCase = string[1:]
if depth == max_depth:
__lowerCAmelCase = MID_NUM_TO_LAYER[layer_num]
__lowerCAmelCase = "mid_block"
elif depth > 0 and int(lowerCamelCase ) < 7:
__lowerCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
__lowerCAmelCase = f'''down_blocks.{depth}'''
elif depth > 0 and int(lowerCamelCase ) > 7:
__lowerCAmelCase = UP_NUM_TO_LAYER[layer_num]
__lowerCAmelCase = f'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
__lowerCAmelCase = DEPTH_0_TO_LAYER[layer_num]
__lowerCAmelCase = f'''up_blocks.{max_depth - 1}''' if int(lowerCamelCase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' )
__lowerCAmelCase = string_left[1:]
if "resnets" in new_layer:
__lowerCAmelCase = convert_resconv_naming(lowerCamelCase )
elif "attentions" in new_layer:
__lowerCAmelCase = convert_attn_naming(lowerCamelCase )
__lowerCAmelCase = new_string_left
if not isinstance(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = prefix + "." + new_layer + "." + string_left
else:
__lowerCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __lowerCAmelCase ( lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowerCAmelCase = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
__lowerCAmelCase = rename(lowerCamelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowerCAmelCase = transform_conv_attns(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowerCAmelCase = v
return new_state_dict
def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
if len(lowerCamelCase ) == 1:
if len(v.shape ) == 3:
# weight
__lowerCAmelCase = v[:, :, 0]
else:
# bias
__lowerCAmelCase = v
else:
# qkv matrices
__lowerCAmelCase = v.shape[0]
__lowerCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__lowerCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__lowerCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCAmelCase ( lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
__lowerCAmelCase = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
__lowerCAmelCase = download(lowerCamelCase )
__lowerCAmelCase = MODELS_MAP[model_name]["sample_rate"]
__lowerCAmelCase = MODELS_MAP[model_name]["sample_size"]
__lowerCAmelCase = Object()
__lowerCAmelCase = sample_size
__lowerCAmelCase = sample_rate
__lowerCAmelCase = 0
__lowerCAmelCase = UNetaDModel(sample_size=lowerCamelCase , sample_rate=lowerCamelCase )
__lowerCAmelCase = diffusers_model.state_dict()
__lowerCAmelCase = DiffusionUncond(lowerCamelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase )["state_dict"] )
__lowerCAmelCase = orig_model.diffusion_ema.eval()
__lowerCAmelCase = orig_model.state_dict()
__lowerCAmelCase = rename_orig_weights(lowerCamelCase )
__lowerCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__lowerCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase ) == 0, f'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(lowerCamelCase ) ), f'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
__lowerCAmelCase = value.squeeze()
__lowerCAmelCase = value
diffusers_model.load_state_dict(lowerCamelCase )
__lowerCAmelCase = 1_00
__lowerCAmelCase = 33
__lowerCAmelCase = IPNDMScheduler(num_train_timesteps=lowerCamelCase )
__lowerCAmelCase = torch.manual_seed(lowerCamelCase )
__lowerCAmelCase = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase ).to(lowerCamelCase )
__lowerCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase )[:-1]
__lowerCAmelCase = get_crash_schedule(lowerCamelCase )
__lowerCAmelCase = DanceDiffusionPipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
__lowerCAmelCase = torch.manual_seed(33 )
__lowerCAmelCase = pipe(num_inference_steps=lowerCamelCase , generator=lowerCamelCase ).audios
__lowerCAmelCase = sampling.iplms_sample(lowerCamelCase , lowerCamelCase , lowerCamelCase , {} )
__lowerCAmelCase = generated.clamp(-1 , 1 )
__lowerCAmelCase = (generated - audio).abs().sum()
__lowerCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , lowerCamelCase )
print("Diff max" , lowerCamelCase )
assert diff_max < 1e-3, f'''Diff max: {diff_max} is too much :-/'''
print(f'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase : Any = parser.parse_args()
main(args) | 39 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 | 1 |
def __lowercase ( _UpperCAmelCase = 600_851_475_143 ) -> List[Any]:
'''simple docstring'''
try:
__lowercase = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowercase = 1
__lowercase = 2
while i * i <= n:
while n % i == 0:
__lowercase = i
n //= i
i += 1
if n > 1:
__lowercase = n
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 321 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
lowercase_ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase_ : Union[str, Any] = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
lowercase_ : List[str] = {'''allegro/herbert-base-cased''': 514}
lowercase_ : Union[str, Any] = {}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Union[str, Any] = HerbertTokenizer
def __init__( self : int , lowerCamelCase_ : int=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Dict="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : Dict="<pad>" , lowerCamelCase_ : Dict="<mask>" , lowerCamelCase_ : Optional[Any]="</s>" , **lowerCamelCase_ : List[Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , cls_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , **lowerCamelCase_ , )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id]
_snake_case : str = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : List[str] = [self.sep_token_id]
_snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 304 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__lowerCAmelCase = "\\n\n"
__lowerCAmelCase = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
__lowerCAmelCase = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
def UpperCAmelCase__ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : int = 16 , __UpperCamelCase : bool = True , __UpperCamelCase : Tuple=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_UpperCAmelCase = "cuda"
else:
_UpperCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = model.to(__UpperCamelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCamelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__UpperCamelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_UpperCAmelCase = model.config.max_length - 1
else:
_UpperCAmelCase = model.config.max_length
_UpperCAmelCase = tokenizer(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors="pt" , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase )
_UpperCAmelCase = encodings["input_ids"]
_UpperCAmelCase = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_UpperCAmelCase = []
_UpperCAmelCase = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ):
_UpperCAmelCase = min(start_index + batch_size , len(__UpperCamelCase ) )
_UpperCAmelCase = encoded_texts[start_index:end_index]
_UpperCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
_UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase )
_UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_UpperCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 )
_UpperCAmelCase = encoded_batch
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits
_UpperCAmelCase = out_logits[..., :-1, :].contiguous()
_UpperCAmelCase = labels[..., 1:].contiguous()
_UpperCAmelCase = attn_mask[..., 1:].contiguous()
_UpperCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
| 129 |
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase):
__SCREAMING_SNAKE_CASE : List[str] = ["""torch""", """torchsde"""]
def __init__( self : Optional[Any] , *__UpperCamelCase : int , **__UpperCamelCase : List[Any] ):
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : List[str] ):
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[Any] ):
requires_backends(cls , ["torch", "torchsde"] )
| 129 | 1 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase ( lowercase_ ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : str=99 , UpperCamelCase_ : int=32 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : Optional[int]="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Optional[int]=16 , UpperCamelCase_ : str=2 , UpperCamelCase_ : str=0.02 , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : int="None" , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : Any=None , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = relative_attention
__A = position_biased_input
__A = pos_att_type
__A = scope
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = None
if self.use_input_mask:
__A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = self.get_config()
__A = 300
return config
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : str ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] ):
"""simple docstring"""
__A = DebertaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )[0]
__A = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )[0]
__A = model(UpperCamelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ):
"""simple docstring"""
__A = DebertaForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Any ):
"""simple docstring"""
__A = self.num_labels
__A = DebertaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCamelCase_ )
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ):
"""simple docstring"""
__A = self.num_labels
__A = DebertaForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : str ):
"""simple docstring"""
__A = DebertaForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
__A = self.prepare_config_and_inputs()
(
__A
) = config_and_inputs
__A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = DebertaModelTester(self )
__A = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase_ )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = DebertaModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
pass
@slow
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
__A = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
# compare the actual values for a slice.
__A = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 637 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class SCREAMING_SNAKE_CASE_ ( snake_case ):
__a : List[str] = '''poolformer'''
def __init__( self , lowercase=3 , lowercase=1_6 , lowercase=1_6 , lowercase=3 , lowercase=4.0 , lowercase=[2, 2, 6, 2] , lowercase=[6_4, 1_2_8, 3_2_0, 5_1_2] , lowercase=[7, 3, 3, 3] , lowercase=[4, 2, 2, 2] , lowercase=[2, 1, 1, 1] , lowercase=4 , lowercase=0.0 , lowercase="gelu" , lowercase=True , lowercase=1e-5 , lowercase=0.0_2 , **lowercase , ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = num_channels
__SCREAMING_SNAKE_CASE : Dict = patch_size
__SCREAMING_SNAKE_CASE : int = stride
__SCREAMING_SNAKE_CASE : Optional[Any] = padding
__SCREAMING_SNAKE_CASE : str = pool_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_sizes
__SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
__SCREAMING_SNAKE_CASE : List[str] = depths
__SCREAMING_SNAKE_CASE : Union[str, Any] = patch_sizes
__SCREAMING_SNAKE_CASE : Optional[Any] = strides
__SCREAMING_SNAKE_CASE : Optional[Any] = num_encoder_blocks
__SCREAMING_SNAKE_CASE : Any = drop_path_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = use_layer_scale
__SCREAMING_SNAKE_CASE : List[Any] = layer_scale_init_value
__SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
super().__init__(**lowercase )
class SCREAMING_SNAKE_CASE_ ( snake_case ):
__a : Union[str, Any] = version.parse('''1.11''' )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ) -> float:
'''simple docstring'''
return 2e-3
| 158 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A :
def __init__(self : List[str] , __a : Optional[int] , __a : Union[str, Any]=13 , __a : str=30 , __a : Union[str, Any]=2 , __a : Tuple=3 , __a : str=True , __a : List[Any]=True , __a : Optional[int]=32 , __a : List[Any]=2 , __a : Union[str, Any]=4 , __a : Optional[Any]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : str=0.1 , __a : Any=10 , __a : Optional[int]=0.02 , __a : List[Any]=3 , __a : Optional[Any]=0.6 , __a : Optional[int]=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = mask_ratio
UpperCAmelCase_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def _lowercase (self : Optional[Any] ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _lowercase (self : Optional[int] , __a : List[Any] , __a : Any , __a : Optional[Any] ):
UpperCAmelCase_ = TFViTMAEModel(config=__a )
UpperCAmelCase_ = model(__a , training=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase (self : Any , __a : Any , __a : str , __a : Union[str, Any] ):
UpperCAmelCase_ = TFViTMAEForPreTraining(__a )
UpperCAmelCase_ = model(__a , training=__a )
# expected sequence length = num_patches
UpperCAmelCase_ = (self.image_size // self.patch_size) ** 2
UpperCAmelCase_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = TFViTMAEForPreTraining(__a )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__a , training=__a )
UpperCAmelCase_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _lowercase (self : str ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : Any = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
a__ : Tuple = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
a__ : int = False
a__ : str = False
a__ : Optional[int] = False
a__ : List[Any] = False
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = TFViTMAEModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def _lowercase (self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase (self : int ):
pass
def _lowercase (self : Any ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , tf.keras.layers.Layer ) )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a )
def _lowercase (self : Any ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = self._prepare_for_class(__a , __a )
UpperCAmelCase_ = model(__a , noise=__a )
UpperCAmelCase_ = copy.deepcopy(self._prepare_for_class(__a , __a ) )
UpperCAmelCase_ = model(**__a , noise=__a )
UpperCAmelCase_ = outputs_dict[0].numpy()
UpperCAmelCase_ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def _lowercase (self : Tuple ):
# make the mask reproducible
np.random.seed(2 )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__a : int ):
UpperCAmelCase_ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__a ):
UpperCAmelCase_ = v.numpy()
else:
UpperCAmelCase_ = np.array(__a )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = self._prepare_for_class(__a , __a )
UpperCAmelCase_ = prepare_numpy_arrays(__a )
UpperCAmelCase_ = model(__a , noise=__a )
UpperCAmelCase_ = model(**__a , noise=__a )
self.assert_outputs_same(__a , __a )
def _lowercase (self : List[str] , __a : Any , __a : List[Any] , __a : Tuple ):
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase_ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCAmelCase_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase_ = tf.constant(__a )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase_ = tf_noise
super().check_pt_tf_models(__a , __a , __a )
def _lowercase (self : Any ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__a )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(__a , __a ),)
if isinstance(__a , __a )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__a , "_keras_serializable" , __a )
}
UpperCAmelCase_ = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase_ = tf.convert_to_tensor(__a )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
UpperCAmelCase_ = main_layer_class(__a )
UpperCAmelCase_ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCAmelCase_ = tf.keras.Model(__a , outputs=main_layer(__a ) )
UpperCAmelCase_ = model(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = os.path.join(__a , "keras_model.h5" )
model.save(__a )
UpperCAmelCase_ = tf.keras.models.load_model(
__a , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__a , tf.keras.Model )
UpperCAmelCase_ = model(__a )
self.assert_outputs_same(__a , __a )
@slow
def _lowercase (self : str ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = self._prepare_for_class(__a , __a )
UpperCAmelCase_ = model(__a , noise=__a )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase_ = outputs.last_hidden_state.numpy()
UpperCAmelCase_ = 0
else:
UpperCAmelCase_ = outputs.logits.numpy()
UpperCAmelCase_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
UpperCAmelCase_ = model_class.from_pretrained(__a )
UpperCAmelCase_ = model(__a , noise=__a )
if model_class.__name__ == "TFViTMAEModel":
UpperCAmelCase_ = after_outputs["last_hidden_state"].numpy()
UpperCAmelCase_ = 0
else:
UpperCAmelCase_ = after_outputs["logits"].numpy()
UpperCAmelCase_ = 0
UpperCAmelCase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-5 )
def _lowercase (self : List[str] ):
# make mask reproducible
np.random.seed(2 )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = int((config.image_size // config.patch_size) ** 2 )
UpperCAmelCase_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = self._prepare_for_class(__a , __a )
UpperCAmelCase_ = model(__a , noise=__a )
UpperCAmelCase_ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__a )
UpperCAmelCase_ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCAmelCase_ = model_class.from_config(model.config )
UpperCAmelCase_ = new_model(__a ) # Build model
new_model.set_weights(model.get_weights() )
UpperCAmelCase_ = new_model(__a , noise=__a )
self.assert_outputs_same(__a , __a )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase (self : Optional[Any] ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase (self : Dict ):
pass
@slow
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(__a )
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def _lowercase (self : Dict ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase (self : Union[str, Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase_ = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__a , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase_ = ViTMAEConfig()
UpperCAmelCase_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase_ = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCAmelCase_ = model(**__a , noise=__a )
# verify the logits
UpperCAmelCase_ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , __a )
UpperCAmelCase_ = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __a , atol=1E-4 )
| 714 | '''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_: str ={
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 415 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __magic_name__ (unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , _a=True , _a=1 / 255 , _a=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean
lowerCAmelCase_ = image_std
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_pad
def __a ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __a ( self , _a , _a=False ) -> Union[str, Any]:
if not batched:
lowerCAmelCase_ = image_inputs[0]
if isinstance(_a , Image.Image ):
lowerCAmelCase_ , lowerCAmelCase_ = image.size
else:
lowerCAmelCase_ , lowerCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase_ = int(self.size["shortest_edge"] * h / w )
lowerCAmelCase_ = self.size["shortest_edge"]
elif w > h:
lowerCAmelCase_ = self.size["shortest_edge"]
lowerCAmelCase_ = int(self.size["shortest_edge"] * w / h )
else:
lowerCAmelCase_ = self.size["shortest_edge"]
lowerCAmelCase_ = self.size["shortest_edge"]
else:
lowerCAmelCase_ = []
for image in image_inputs:
lowerCAmelCase_ , lowerCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase_ = max(_a , key=lambda _a : item[0] )[0]
lowerCAmelCase_ = max(_a , key=lambda _a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = YolosImageProcessor if is_vision_available() else None
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = YolosImageProcessingTester(self )
@property
def __a ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , "image_mean" ) )
self.assertTrue(hasattr(_a , "image_std" ) )
self.assertTrue(hasattr(_a , "do_normalize" ) )
self.assertTrue(hasattr(_a , "do_resize" ) )
self.assertTrue(hasattr(_a , "size" ) )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , _a )
lowerCAmelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_a )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , _a )
def __a ( self ) -> List[str]:
pass
def __a ( self ) -> int:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(_a , batched=_a )
lowerCAmelCase_ = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ = image_processing(_a , return_tensors="pt" ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self ) -> str:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ = image_processing(_a , return_tensors="pt" ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self ) -> Optional[int]:
# Initialize image_processings
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
lowerCAmelCase_ = self.image_processing_class(do_resize=_a , do_normalize=_a , do_rescale=_a )
# create random PyTorch tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowerCAmelCase_ = image_processing_a.pad(_a , return_tensors="pt" )
lowerCAmelCase_ = image_processing_a(_a , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1E-4 ) )
@slow
def __a ( self ) -> str:
# prepare image and target
lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {"image_id": 39769, "annotations": target}
# encode them
lowerCAmelCase_ = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
lowerCAmelCase_ = image_processing(images=_a , annotations=_a , return_tensors="pt" )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , _a )
lowerCAmelCase_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _a , atol=1E-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _a ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _a )
lowerCAmelCase_ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _a , atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _a ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _a ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _a ) )
# verify orig_size
lowerCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _a ) )
# verify size
lowerCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _a ) )
@slow
def __a ( self ) -> str:
# prepare image, target and masks_path
lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
lowerCAmelCase_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCAmelCase_ = YolosImageProcessor(format="coco_panoptic" )
lowerCAmelCase_ = image_processing(images=_a , annotations=_a , masks_path=_a , return_tensors="pt" )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , _a )
lowerCAmelCase_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _a , atol=1E-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _a ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _a )
lowerCAmelCase_ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _a , atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _a ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _a ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _a ) )
# verify masks
lowerCAmelCase_ = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _a )
# verify orig_size
lowerCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _a ) )
# verify size
lowerCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _a ) )
| 122 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122 | 1 |
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = 1
@register_to_config
def __init__( self , __UpperCAmelCase=20_00 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=1E-3 ) -> str:
A : str = None
A : Union[str, Any] = None
A : Optional[int] = None
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Optional[Any]:
A : List[Any] = torch.linspace(1 , self.config.sampling_eps , _lowercase , device=_lowercase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> int:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
A : Optional[Any] = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
A : Dict = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
A : str = std.flatten()
while len(std.shape ) < len(score.shape ):
A : List[str] = std.unsqueeze(-1 )
A : str = -score / std
# compute
A : Any = -1.0 / len(self.timesteps )
A : List[str] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
A : Tuple = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
A : Union[str, Any] = beta_t.unsqueeze(-1 )
A : List[str] = -0.5 * beta_t * x
A : Dict = torch.sqrt(_lowercase )
A : Any = drift - diffusion**2 * score
A : str = x + drift * dt
# add noise
A : Dict = randn_tensor(x.shape , layout=x.layout , generator=_lowercase , device=x.device , dtype=x.dtype )
A : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> int:
return self.config.num_train_timesteps
| 721 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=30 , __UpperCAmelCase=4_00 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 2_55 , __UpperCAmelCase=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A : List[Any] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
A : List[Any] = parent
A : Dict = batch_size
A : Optional[Any] = num_channels
A : Union[str, Any] = min_resolution
A : int = max_resolution
A : Optional[int] = do_resize
A : Dict = size
A : List[Any] = do_normalize
A : int = image_mean
A : List[str] = image_std
A : Optional[int] = do_rescale
A : Any = rescale_factor
A : int = do_pad
def snake_case ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[Any]:
if not batched:
A : List[Any] = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
A , A : Tuple = image.size
else:
A , A : Dict = image.shape[1], image.shape[2]
if w < h:
A : str = int(self.size['''shortest_edge'''] * h / w )
A : Dict = self.size['''shortest_edge''']
elif w > h:
A : Union[str, Any] = self.size['''shortest_edge''']
A : Any = int(self.size['''shortest_edge'''] * w / h )
else:
A : Any = self.size['''shortest_edge''']
A : Union[str, Any] = self.size['''shortest_edge''']
else:
A : Optional[Any] = []
for image in image_inputs:
A , A : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : Optional[int] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
A : Union[str, Any] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Dict = ConditionalDetrImageProcessor if is_vision_available() else None
def snake_case ( self ) -> Optional[int]:
A : str = ConditionalDetrImageProcessingTester(self )
@property
def snake_case ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ) -> str:
A : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) )
def snake_case ( self ) -> List[Any]:
A : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
A : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
def snake_case ( self ) -> Dict:
pass
def snake_case ( self ) -> Tuple:
# Initialize image_processing
A : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
A : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A , A : Tuple = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A , A : Optional[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
A : Any = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ) -> Optional[Any]:
# Initialize image_processing
A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
A : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A , A : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Any = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
A , A : Union[str, Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ) -> List[str]:
# Initialize image_processing
A : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
A : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A , A : Dict = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : int = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
A , A : Tuple = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case ( self ) -> Optional[Any]:
# prepare image and target
A : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
A : int = json.loads(f.read() )
A : Tuple = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
A : Tuple = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
A : Union[str, Any] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
A : Optional[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase )
A : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
A : Optional[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) )
# verify boxes
A : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase )
A : Tuple = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
A : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) )
# verify is_crowd
A : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) )
# verify class_labels
A : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) )
# verify orig_size
A : List[str] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) )
# verify size
A : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) )
@slow
def snake_case ( self ) -> Tuple:
# prepare image, target and masks_path
A : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
A : Optional[Any] = json.loads(f.read() )
A : int = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
A : Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
A : Dict = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
A : List[str] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
A : Any = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase )
A : Dict = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
A : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) )
# verify boxes
A : Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase )
A : List[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
A : int = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) )
# verify is_crowd
A : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) )
# verify class_labels
A : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) )
# verify masks
A : Dict = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __UpperCAmelCase )
# verify orig_size
A : Tuple = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) )
# verify size
A : int = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) )
| 423 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _snake_case :
def __init__( self : Any, __lowercase : Optional[Any], __lowercase : Dict=2, __lowercase : List[str]=True, __lowercase : Optional[int]=False, __lowercase : Optional[int]=10, __lowercase : Tuple=3, __lowercase : List[str]=32 * 8, __lowercase : List[Any]=32 * 8, __lowercase : Tuple=4, __lowercase : Tuple=64, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = is_training
lowercase__ = use_auxiliary_loss
lowercase__ = num_queries
lowercase__ = num_channels
lowercase__ = min_size
lowercase__ = max_size
lowercase__ = num_labels
lowercase__ = hidden_dim
lowercase__ = hidden_dim
def A__ ( self : str ):
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCamelCase )
lowercase__ = torch.ones([self.batch_size, self.min_size, self.max_size], device=_lowerCamelCase )
lowercase__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=_lowerCamelCase ) > 0.5
).float()
lowercase__ = (torch.rand((self.batch_size, self.num_labels), device=_lowerCamelCase ) > 0.5).long()
lowercase__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ ( self : Any ):
lowercase__ = MaskaFormerConfig(
hidden_size=self.hidden_dim, )
lowercase__ = self.num_queries
lowercase__ = self.num_labels
lowercase__ = [1, 1, 1, 1]
lowercase__ = self.num_channels
lowercase__ = 64
lowercase__ = 128
lowercase__ = self.hidden_dim
lowercase__ = self.hidden_dim
lowercase__ = self.hidden_dim
return config
def A__ ( self : Optional[Any] ):
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.prepare_config_and_inputs()
lowercase__ = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def A__ ( self : Optional[Any], __lowercase : List[Any], __lowercase : Any ):
lowercase__ = output.encoder_hidden_states
lowercase__ = output.pixel_decoder_hidden_states
lowercase__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCamelCase ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ), config.decoder_layers )
def A__ ( self : Any, __lowercase : Optional[Any], __lowercase : Optional[int], __lowercase : int, __lowercase : List[Any]=False ):
with torch.no_grad():
lowercase__ = MaskaFormerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase__ = model(pixel_values=_lowerCamelCase, pixel_mask=_lowerCamelCase )
lowercase__ = model(_lowerCamelCase, output_hidden_states=_lowerCamelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_dim), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCamelCase, _lowerCamelCase )
def A__ ( self : Tuple, __lowercase : Any, __lowercase : Union[str, Any], __lowercase : Any, __lowercase : Tuple, __lowercase : Optional[Any] ):
lowercase__ = MaskaFormerForUniversalSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
def comm_check_on_output(__lowercase : Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase__ = model(pixel_values=_lowerCamelCase, pixel_mask=_lowerCamelCase )
lowercase__ = model(_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
lowercase__ = model(
pixel_values=_lowerCamelCase, pixel_mask=_lowerCamelCase, mask_labels=_lowerCamelCase, class_labels=_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape, torch.Size([1] ) )
@require_torch
class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ : List[str] =(MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCamelCase__ : int ={"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : Dict =False
UpperCamelCase__ : int =False
UpperCamelCase__ : Union[str, Any] =False
def A__ ( self : Dict ):
lowercase__ = MaskaFormerModelTester(self )
lowercase__ = ConfigTester(self, config_class=_lowerCamelCase, has_text_modality=_lowerCamelCase )
def A__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def A__ ( self : List[Any] ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase, **_lowerCamelCase, output_hidden_states=_lowerCamelCase )
def A__ ( self : Union[str, Any] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCamelCase )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def A__ ( self : Dict ):
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def A__ ( self : Tuple ):
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def A__ ( self : Tuple ):
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def A__ ( self : Tuple ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`" )
def A__ ( self : Optional[int] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self : Union[str, Any] ):
pass
def A__ ( self : Dict ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], _lowerCamelCase )
@slow
def A__ ( self : Any ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowercase__ = MaskaFormerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def A__ ( self : Optional[int] ):
lowercase__ = (self.model_tester.min_size,) * 2
lowercase__ = {
"pixel_values": torch.randn((2, 3, *size), device=_lowerCamelCase ),
"mask_labels": torch.randn((2, 10, *size), device=_lowerCamelCase ),
"class_labels": torch.zeros(2, 10, device=_lowerCamelCase ).long(),
}
lowercase__ = self.model_tester.get_config()
lowercase__ = MaskaFormerForUniversalSegmentation(_lowerCamelCase ).to(_lowerCamelCase )
lowercase__ = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def A__ ( self : List[str] ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase, **_lowerCamelCase, output_hidden_states=_lowerCamelCase )
def A__ ( self : str ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_lowerCamelCase ).to(_lowerCamelCase )
lowercase__ = model(**_lowerCamelCase, output_attentions=_lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def A__ ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
lowercase__ = self.all_model_classes[1]
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
lowercase__ = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
lowercase__ = model(_lowerCamelCase, mask_labels=_lowerCamelCase, class_labels=_lowerCamelCase ).loss
loss.backward()
def A__ ( self : Union[str, Any] ):
lowercase__ = self.all_model_classes[1]
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
lowercase__ = True
lowercase__ = True
lowercase__ = model_class(_lowerCamelCase ).to(_lowerCamelCase )
model.train()
lowercase__ = model(_lowerCamelCase, mask_labels=_lowerCamelCase, class_labels=_lowerCamelCase )
lowercase__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowercase__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase_ = 1e-4
def __lowerCAmelCase ( ):
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class _snake_case ( unittest.TestCase):
@cached_property
def A__ ( self : List[Any] ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A__ ( self : Optional[int] ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A__ ( self : Dict ):
lowercase__ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(_lowerCamelCase, return_tensors="pt" ).to(_lowerCamelCase )
lowercase__ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase, (1, 3, 384, 384) )
with torch.no_grad():
lowercase__ = model(**_lowerCamelCase )
lowercase__ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], _lowerCamelCase, atol=_lowerCamelCase ) )
lowercase__ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], _lowerCamelCase, atol=_lowerCamelCase ) )
lowercase__ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], _lowerCamelCase, atol=_lowerCamelCase ) )
def A__ ( self : Tuple ):
lowercase__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(_lowerCamelCase, return_tensors="pt" ).to(_lowerCamelCase )
lowercase__ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase, (1, 3, 384, 384) )
with torch.no_grad():
lowercase__ = model(**_lowerCamelCase )
# masks_queries_logits
lowercase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowercase__ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
lowercase__ = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], _lowerCamelCase, atol=_lowerCamelCase ) )
# class_queries_logits
lowercase__ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1) )
lowercase__ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], _lowerCamelCase, atol=_lowerCamelCase ) )
def A__ ( self : List[Any] ):
lowercase__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
lowercase__ = self.default_image_processor
lowercase__ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )], segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )], return_tensors="pt", )
lowercase__ = inputs["pixel_values"].to(_lowerCamelCase )
lowercase__ = [el.to(_lowerCamelCase ) for el in inputs["mask_labels"]]
lowercase__ = [el.to(_lowerCamelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
lowercase__ = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 413 |
"""simple docstring"""
import argparse
import copy
def a_ ( __a ):
A__ = {}
with open(__a ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
A__ = []
_list.append([line.split()[1], line.split()[2]] )
A__ = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
A__ = []
_list.append([line.split()[0], line.split()[2]] )
A__ = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def a_ ( __a , __a ):
with open(__a ) as f:
A__ = f.read(1 )
A__ = start_node
A__ = []
A__ = start_node
A__ = 0
while visiting not in first_solution:
A__ = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__a ) and k[0] not in first_solution:
A__ = k[1]
A__ = k[0]
first_solution.append(__a )
A__ = distance_of_first_solution + int(__a )
A__ = best_node
first_solution.append(__a )
A__ = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
A__ = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def a_ ( __a , __a ):
A__ = []
for n in solution[1:-1]:
A__ = solution.index(__a )
for kn in solution[1:-1]:
A__ = solution.index(__a )
if n == kn:
continue
A__ = copy.deepcopy(__a )
A__ = kn
A__ = n
A__ = 0
for k in _tmp[:-1]:
A__ = _tmp[_tmp.index(__a ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
A__ = distance + int(i[1] )
_tmp.append(__a )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
A__ = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __a : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def a_ ( __a , __a , __a , __a , __a ):
A__ = 1
A__ = first_solution
A__ = []
A__ = distance_of_first_solution
A__ = solution
while count <= iters:
A__ = find_neighborhood(__a , __a )
A__ = 0
A__ = neighborhood[index_of_best_solution]
A__ = len(__a ) - 1
A__ = False
while not found:
A__ = 0
while i < len(__a ):
if best_solution[i] != solution[i]:
A__ = best_solution[i]
A__ = solution[i]
break
A__ = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
A__ = True
A__ = best_solution[:-1]
A__ = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
A__ = cost
A__ = solution
else:
A__ = index_of_best_solution + 1
A__ = neighborhood[index_of_best_solution]
if len(__a ) >= size:
tabu_list.pop(0 )
A__ = count + 1
return best_solution_ever, best_cost
def a_ ( __a=None ):
A__ = generate_neighbours(args.File )
A__ , A__ = generate_first_solution(
args.File , __a )
A__ , A__ = tabu_search(
__a , __a , __a , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
__snake_case : Dict = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 571 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class lowercase__ ( __A ):
__UpperCamelCase = """mvp"""
__UpperCamelCase = ["""past_key_values"""]
__UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , _lowercase=50_267 , _lowercase=1_024 , _lowercase=12 , _lowercase=4_096 , _lowercase=16 , _lowercase=12 , _lowercase=4_096 , _lowercase=16 , _lowercase=0.0 , _lowercase=0.0 , _lowercase="gelu" , _lowercase=1_024 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=0.0 , _lowercase=False , _lowercase=True , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=True , _lowercase=2 , _lowercase=2 , _lowercase=False , _lowercase=100 , _lowercase=800 , **_lowercase , ):
lowerCAmelCase_ : List[Any] = vocab_size
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = d_model
lowerCAmelCase_ : List[Any] = encoder_ffn_dim
lowerCAmelCase_ : Tuple = encoder_layers
lowerCAmelCase_ : Optional[int] = encoder_attention_heads
lowerCAmelCase_ : str = decoder_ffn_dim
lowerCAmelCase_ : Union[str, Any] = decoder_layers
lowerCAmelCase_ : str = decoder_attention_heads
lowerCAmelCase_ : Optional[Any] = dropout
lowerCAmelCase_ : int = attention_dropout
lowerCAmelCase_ : Tuple = activation_dropout
lowerCAmelCase_ : List[Any] = activation_function
lowerCAmelCase_ : List[str] = init_std
lowerCAmelCase_ : int = encoder_layerdrop
lowerCAmelCase_ : Optional[int] = decoder_layerdrop
lowerCAmelCase_ : Any = classifier_dropout
lowerCAmelCase_ : Tuple = use_cache
lowerCAmelCase_ : int = encoder_layers
lowerCAmelCase_ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase_ : Optional[Any] = use_prompt
lowerCAmelCase_ : List[str] = prompt_length
lowerCAmelCase_ : str = prompt_mid_dim
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , _lowercase ):
lowerCAmelCase_ : str = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"""The config can simply be saved and uploaded again to be fixed.""" )
| 440 |
from collections.abc import Generator
from math import sin
def _lowerCAmelCase ( _a : bytes ) -> bytes:
if len(_a ) != 32:
raise ValueError("""Input must be of length 32""" )
lowerCAmelCase_ : Any = B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _lowerCAmelCase ( _a : int ) -> bytes:
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCAmelCase_ : Tuple = format(_a , """08x""" )[-8:]
lowerCAmelCase_ : Any = B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def _lowerCAmelCase ( _a : bytes ) -> bytes:
lowerCAmelCase_ : Tuple = B""""""
for char in message:
bit_string += format(_a , """08b""" ).encode("""utf-8""" )
lowerCAmelCase_ : Dict = format(len(_a ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_a ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _lowerCAmelCase ( _a : bytes ) -> Generator[list[int], None, None]:
if len(_a ) % 5_12 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(_a ) , 5_12 ):
lowerCAmelCase_ : int = bit_string[pos : pos + 5_12]
lowerCAmelCase_ : Any = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _lowerCAmelCase ( _a : int ) -> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCAmelCase_ : List[str] = format(_a , """032b""" )
lowerCAmelCase_ : Optional[int] = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_a , 2 )
def _lowerCAmelCase ( _a : int , _a : int ) -> int:
return (a + b) % 2**32
def _lowerCAmelCase ( _a : int , _a : int ) -> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _lowerCAmelCase ( _a : bytes ) -> bytes:
lowerCAmelCase_ : Union[str, Any] = preprocess(_a )
lowerCAmelCase_ : Optional[int] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowerCAmelCase_ : Tuple = 0X67452301
lowerCAmelCase_ : Optional[int] = 0XEFCDAB89
lowerCAmelCase_ : Tuple = 0X98BADCFE
lowerCAmelCase_ : Tuple = 0X10325476
lowerCAmelCase_ : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_a ):
lowerCAmelCase_ : int = aa
lowerCAmelCase_ : Any = ba
lowerCAmelCase_ : List[str] = ca
lowerCAmelCase_ : Optional[Any] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCAmelCase_ : int = d ^ (b & (c ^ d))
lowerCAmelCase_ : List[Any] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCAmelCase_ : List[Any] = c ^ (d & (b ^ c))
lowerCAmelCase_ : Optional[Any] = (5 * i + 1) % 16
elif i <= 47:
lowerCAmelCase_ : Union[str, Any] = b ^ c ^ d
lowerCAmelCase_ : Optional[Any] = (3 * i + 5) % 16
else:
lowerCAmelCase_ : Any = c ^ (b | not_aa(_a ))
lowerCAmelCase_ : List[str] = (7 * i) % 16
lowerCAmelCase_ : Tuple = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCAmelCase_ : Tuple = d
lowerCAmelCase_ : Optional[Any] = c
lowerCAmelCase_ : Dict = b
lowerCAmelCase_ : List[Any] = sum_aa(_a , left_rotate_aa(_a , shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCAmelCase_ : Optional[int] = sum_aa(_a , _a )
lowerCAmelCase_ : Optional[int] = sum_aa(_a , _a )
lowerCAmelCase_ : Dict = sum_aa(_a , _a )
lowerCAmelCase_ : Tuple = sum_aa(_a , _a )
lowerCAmelCase_ : int = reformat_hex(_a ) + reformat_hex(_a ) + reformat_hex(_a ) + reformat_hex(_a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 440 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__lowercase = False
class a__( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""")
pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""")
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pipe(
image=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
lowerCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 370 | '''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[PIL.Image.Image, np.ndarray]
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
prior=__lowerCAmelCase , image_encoder=__lowerCAmelCase , image_processor=__lowerCAmelCase , scheduler=__lowerCAmelCase , renderer=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
if latents is None:
lowerCAmelCase = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase)
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
lowerCAmelCase = latents.to(__lowerCAmelCase)
lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def a_ ( self , __lowerCAmelCase=0):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""")
lowerCAmelCase = torch.device(f"cuda:{gpu_id}")
lowerCAmelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase , __lowerCAmelCase)
@property
def a_ ( self):
"""simple docstring"""
if self.device != torch.device("""meta""") or not hasattr(self.image_encoder , """_hf_hook"""):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__lowerCAmelCase , """_hf_hook""")
and hasattr(module._hf_hook , """execution_device""")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase) and isinstance(image[0] , torch.Tensor):
lowerCAmelCase = torch.cat(__lowerCAmelCase , axis=0) if image[0].ndim == 4 else torch.stack(__lowerCAmelCase , axis=0)
if not isinstance(__lowerCAmelCase , torch.Tensor):
lowerCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors="""pt""").pixel_values[0].unsqueeze(0)
lowerCAmelCase = image.to(dtype=self.image_encoder.dtype , device=__lowerCAmelCase)
lowerCAmelCase = self.image_encoder(__lowerCAmelCase)["""last_hidden_state"""]
lowerCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCAmelCase = image_embeds.repeat_interleave(__lowerCAmelCase , dim=0)
if do_classifier_free_guidance:
lowerCAmelCase = torch.zeros_like(__lowerCAmelCase)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds])
return image_embeds
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase)
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = 25 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 4.0 , __lowerCAmelCase = 64 , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , PIL.Image.Image):
lowerCAmelCase = 1
elif isinstance(__lowerCAmelCase , torch.Tensor):
lowerCAmelCase = image.shape[0]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image)):
lowerCAmelCase = len(__lowerCAmelCase)
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__lowerCAmelCase)}")
lowerCAmelCase = self._execution_device
lowerCAmelCase = batch_size * num_images_per_prompt
lowerCAmelCase = guidance_scale > 1.0
lowerCAmelCase = self._encode_image(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
# prior
self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase)
lowerCAmelCase = self.scheduler.timesteps
lowerCAmelCase = self.prior.config.num_embeddings
lowerCAmelCase = self.prior.config.embedding_dim
lowerCAmelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCAmelCase = latents.reshape(latents.shape[0] , __lowerCAmelCase , __lowerCAmelCase)
for i, t in enumerate(self.progress_bar(__lowerCAmelCase)):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
lowerCAmelCase = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.prior(
__lowerCAmelCase , timestep=__lowerCAmelCase , proj_embedding=__lowerCAmelCase , ).predicted_image_embedding
# remove the variance
lowerCAmelCase , lowerCAmelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCAmelCase , lowerCAmelCase = noise_pred.chunk(2)
lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCAmelCase = self.scheduler.step(
__lowerCAmelCase , timestep=__lowerCAmelCase , sample=__lowerCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__lowerCAmelCase)
lowerCAmelCase = []
for i, latent in enumerate(__lowerCAmelCase):
print()
lowerCAmelCase = self.renderer.decode(
latent[None, :] , __lowerCAmelCase , size=__lowerCAmelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(__lowerCAmelCase)
lowerCAmelCase = torch.stack(__lowerCAmelCase)
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}")
lowerCAmelCase = images.cpu().numpy()
if output_type == "pil":
lowerCAmelCase = [self.numpy_to_pil(__lowerCAmelCase) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__lowerCAmelCase)
| 370 | 1 |
from __future__ import annotations
def A ( __UpperCamelCase ) -> list[int]:
return [ord(lowerCamelCase__ ) - 96 for elem in plain]
def A ( __UpperCamelCase ) -> str:
return "".join(chr(elem + 96 ) for elem in encoded )
def A ( ) -> None:
A__ = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , lowerCamelCase__ )
print('Decoded:' , decode(lowerCamelCase__ ) )
if __name__ == "__main__":
main()
| 710 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = "roberta"
def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self : Dict ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 52 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :str = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :str = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Any = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 236 |
'''simple docstring'''
import math
import sys
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
_UpperCAmelCase = ""
try:
with open(__lowercase , "rb" ) as binary_file:
_UpperCAmelCase = binary_file.read()
for dat in data:
_UpperCAmelCase = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
_UpperCAmelCase = {"0": "0", "1": "1"}
_UpperCAmelCase , _UpperCAmelCase = "", ""
_UpperCAmelCase = len(__lowercase )
for i in range(len(__lowercase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_UpperCAmelCase = lexicon[curr_string]
result += last_match_id
_UpperCAmelCase = last_match_id + "0"
if math.loga(__lowercase ).is_integer():
_UpperCAmelCase = {}
for curr_key in list(__lowercase ):
_UpperCAmelCase = lexicon.pop(__lowercase )
_UpperCAmelCase = new_lex
_UpperCAmelCase = last_match_id + "1"
index += 1
_UpperCAmelCase = ""
return result
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = 8
try:
with open(__lowercase , "wb" ) as opened_file:
_UpperCAmelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowercase ) , __lowercase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__lowercase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
_UpperCAmelCase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_UpperCAmelCase = data_bits[counter:]
_UpperCAmelCase = data_bits[counter + 1 :]
return data_bits
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = read_file_binary(__lowercase )
_UpperCAmelCase = remove_prefix(__lowercase )
_UpperCAmelCase = decompress_data(__lowercase )
write_file_binary(__lowercase , __lowercase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 236 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase):
'''simple docstring'''
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__) -> str:
'''simple docstring'''
snake_case__ : Dict = jnp.ones((batch_size, length)) / length
return scores
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
snake_case__ : int = None
snake_case__ : Optional[Any] = 20
snake_case__ : Optional[int] = self._get_uniform_logits(batch_size=2 , length=lowerCamelCase__)
# tweak scores to not be uniform anymore
snake_case__ : Dict = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
snake_case__ : List[str] = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
snake_case__ : int = jax.nn.softmax(lowerCamelCase__ , axis=-1)
snake_case__ : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5)
snake_case__ : List[str] = FlaxTemperatureLogitsWarper(temperature=1.3)
snake_case__ : Dict = jax.nn.softmax(temp_dist_warper_sharper(lowerCamelCase__ , scores.copy() , cur_len=lowerCamelCase__) , axis=-1)
snake_case__ : Optional[int] = jax.nn.softmax(temp_dist_warper_smoother(lowerCamelCase__ , scores.copy() , cur_len=lowerCamelCase__) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[int] = None
snake_case__ : int = 10
snake_case__ : int = 2
# create ramp distribution
snake_case__ : Optional[Any] = np.broadcast_to(np.arange(lowerCamelCase__)[None, :] , (batch_size, vocab_size)).copy()
snake_case__ : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
snake_case__ : Optional[Any] = FlaxTopKLogitsWarper(3)
snake_case__ : List[Any] = top_k_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
snake_case__ : Any = 5
snake_case__ : Tuple = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
snake_case__ : Any = np.broadcast_to(np.arange(lowerCamelCase__)[None, :] , (batch_size, length)).copy()
snake_case__ : List[Any] = top_k_warp_safety_check(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = None
snake_case__ : List[Any] = 10
snake_case__ : Any = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
snake_case__ : Union[str, Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
snake_case__ : Any = FlaxTopPLogitsWarper(0.8)
snake_case__ : Any = np.exp(top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
snake_case__ : Tuple = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3))
# check edge cases with negative and extreme logits
snake_case__ : Union[str, Any] = np.broadcast_to(np.arange(lowerCamelCase__)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
snake_case__ : Optional[Any] = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
snake_case__ : List[Any] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
snake_case__ : Optional[int] = top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = 20
snake_case__ : int = 4
snake_case__ : Dict = 0
snake_case__ : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase__)
# check that min length is applied at length 5
snake_case__ : Optional[Any] = ids_tensor((batch_size, 20) , vocab_size=20)
snake_case__ : Dict = 5
snake_case__ : Dict = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__)
snake_case__ : int = min_dist_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
snake_case__ : List[Any] = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__)
snake_case__ : List[str] = 15
snake_case__ : Dict = min_dist_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
self.assertFalse(jnp.isinf(lowerCamelCase__).any())
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
snake_case__ : str = 20
snake_case__ : Union[str, Any] = 4
snake_case__ : List[str] = 0
snake_case__ : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase__)
# check that all scores are -inf except the bos_token_id score
snake_case__ : int = ids_tensor((batch_size, 1) , vocab_size=20)
snake_case__ : Union[str, Any] = 1
snake_case__ : List[str] = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__)
snake_case__ : List[Any] = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
snake_case__ : Optional[int] = 3
snake_case__ : Optional[Any] = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__)
snake_case__ : List[Any] = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
self.assertFalse(jnp.isinf(lowerCamelCase__).any())
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
snake_case__ : List[Any] = 20
snake_case__ : List[str] = 4
snake_case__ : Tuple = 0
snake_case__ : Optional[int] = 5
snake_case__ : int = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase__ , eos_token_id=lowerCamelCase__)
# check that all scores are -inf except the eos_token_id when max_length is reached
snake_case__ : List[Any] = ids_tensor((batch_size, 4) , vocab_size=20)
snake_case__ : Union[str, Any] = 4
snake_case__ : str = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__)
snake_case__ : str = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
snake_case__ : Union[str, Any] = 3
snake_case__ : Dict = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__)
snake_case__ : Dict = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
self.assertFalse(jnp.isinf(lowerCamelCase__).any())
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
snake_case__ : Any = 4
snake_case__ : int = 10
snake_case__ : List[str] = 15
snake_case__ : Dict = 2
snake_case__ : Optional[Any] = 1
snake_case__ : List[str] = 15
# dummy input_ids and scores
snake_case__ : Union[str, Any] = ids_tensor((batch_size, sequence_length) , lowerCamelCase__)
snake_case__ : Any = input_ids.copy()
snake_case__ : Any = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__)
snake_case__ : Union[str, Any] = scores.copy()
# instantiate all dist processors
snake_case__ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5)
snake_case__ : int = FlaxTopKLogitsWarper(3)
snake_case__ : int = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
snake_case__ : Any = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase__)
snake_case__ : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase__)
snake_case__ : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase__ , eos_token_id=lowerCamelCase__)
snake_case__ : Union[str, Any] = 10
# no processor list
snake_case__ : Optional[int] = temp_dist_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
snake_case__ : List[Any] = top_k_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
snake_case__ : str = top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
snake_case__ : Optional[int] = min_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
snake_case__ : Tuple = bos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
snake_case__ : List[str] = eos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
# with processor list
snake_case__ : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
snake_case__ : Dict = processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
snake_case__ : Tuple = 4
snake_case__ : List[Any] = 10
snake_case__ : int = 15
snake_case__ : Any = 2
snake_case__ : Dict = 1
snake_case__ : List[Any] = 15
# dummy input_ids and scores
snake_case__ : Optional[int] = ids_tensor((batch_size, sequence_length) , lowerCamelCase__)
snake_case__ : Optional[int] = input_ids.copy()
snake_case__ : List[Any] = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__)
snake_case__ : Dict = scores.copy()
# instantiate all dist processors
snake_case__ : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5)
snake_case__ : List[Any] = FlaxTopKLogitsWarper(3)
snake_case__ : Any = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
snake_case__ : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase__)
snake_case__ : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase__)
snake_case__ : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase__ , eos_token_id=lowerCamelCase__)
snake_case__ : List[Any] = 10
# no processor list
def run_no_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__):
snake_case__ : Optional[int] = temp_dist_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
snake_case__ : Union[str, Any] = top_k_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
snake_case__ : Union[str, Any] = top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
snake_case__ : Optional[int] = min_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
snake_case__ : Tuple = bos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
snake_case__ : List[Any] = eos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
return scores
# with processor list
def run_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__):
snake_case__ : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
snake_case__ : Tuple = processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__)
return scores
snake_case__ : Optional[Any] = jax.jit(lowerCamelCase__)
snake_case__ : Any = jax.jit(lowerCamelCase__)
snake_case__ : Tuple = jitted_run_no_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
snake_case__ : int = jitted_run_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 150 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
lowercase = """"""
lowercase = """"""
lowercase = """"""
lowercase = """"""
def A__ ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
snake_case__ : Any = tweepy.OAuthHandler(_UpperCAmelCase , _UpperCAmelCase )
auth.set_access_token(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ : Any = tweepy.API(_UpperCAmelCase )
# initialize a list to hold all the tweepy Tweets
snake_case__ : int = []
# make initial request for most recent tweets (200 is the maximum allowed count)
snake_case__ : Optional[int] = api.user_timeline(screen_name=_UpperCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# save the id of the oldest tweet less one
snake_case__ : int = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
snake_case__ : int = api.user_timeline(
screen_name=_UpperCAmelCase , count=2_00 , max_id=_UpperCAmelCase )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# update the id of the oldest tweet less one
snake_case__ : List[Any] = alltweets[-1].id - 1
print(F"""...{len(_UpperCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
snake_case__ : List[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , "w" ) as f:
snake_case__ : Optional[Any] = csv.writer(_UpperCAmelCase )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(_UpperCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 150 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a__ ( A__, A__, A__, A__, A__=True, A__="pt" ):
SCREAMING_SNAKE_CASE_ : Tuple = {'add_prefix_space': True} if isinstance(A__, A__ ) and not line.startswith(' ' ) else {}
SCREAMING_SNAKE_CASE_ : Dict = padding_side
return tokenizer(
[line], max_length=A__, padding='max_length' if pad_to_max_length else None, truncation=A__, return_tensors=A__, add_special_tokens=A__, **A__, )
def a__ ( A__, A__, A__=None, ):
SCREAMING_SNAKE_CASE_ : int = input_ids.ne(A__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="train" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="" , ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCAmelCase__ ).joinpath(type_path + '.source' )
SCREAMING_SNAKE_CASE_ : Optional[int] = Path(lowerCAmelCase__ ).joinpath(type_path + '.target' )
SCREAMING_SNAKE_CASE_ : int = self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE_ : int = max_source_length
SCREAMING_SNAKE_CASE_ : Dict = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
SCREAMING_SNAKE_CASE_ : Any = tokenizer
SCREAMING_SNAKE_CASE_ : List[str] = prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE_ : Optional[Any] = src_lang
SCREAMING_SNAKE_CASE_ : List[Any] = tgt_lang
def __len__( self ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE_ : List[str] = self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase__ ).rstrip('\n' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = linecache.getline(str(self.tgt_file ) , lowerCAmelCase__ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE_ : Any = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
)
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
SCREAMING_SNAKE_CASE_ : Dict = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_source_length , 'right' )
SCREAMING_SNAKE_CASE_ : Tuple = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_target_length , 'right' )
SCREAMING_SNAKE_CASE_ : List[str] = source_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE_ : Dict = target_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE_ : str = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
return [len(lowerCAmelCase__ ) for x in Path(lowerCAmelCase__ ).open().readlines()]
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.stack([x['input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE_ : int = torch.stack([x['attention_mask'] for x in batch] )
SCREAMING_SNAKE_CASE_ : List[str] = torch.stack([x['decoder_input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE_ : Dict = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ : List[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCAmelCase__ : Tuple =getLogger(__name__)
def a__ ( A__ ):
return list(itertools.chain.from_iterable(A__ ) )
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = get_git_info()
save_json(A__, os.path.join(A__, 'git_log.json' ) )
def a__ ( A__, A__, A__=4, **A__ ):
with open(A__, 'w' ) as f:
json.dump(A__, A__, indent=A__, **A__ )
def a__ ( A__ ):
with open(A__ ) as f:
return json.load(A__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ : List[str] = git.Repo(search_parent_directories=A__ )
SCREAMING_SNAKE_CASE_ : Any = {
'repo_id': str(A__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def a__ ( A__, A__ ):
return list(map(A__, A__ ) )
def a__ ( A__, A__ ):
with open(A__, 'wb' ) as f:
return pickle.dump(A__, A__ )
def a__ ( A__ ):
def remove_articles(A__ ):
return re.sub(r'\b(a|an|the)\b', ' ', A__ )
def white_space_fix(A__ ):
return " ".join(text.split() )
def remove_punc(A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) )
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = normalize_answer(A__ ).split()
SCREAMING_SNAKE_CASE_ : List[str] = normalize_answer(A__ ).split()
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(A__ ) & Counter(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE_ : List[Any] = 1.0 * num_same / len(A__ )
SCREAMING_SNAKE_CASE_ : int = 1.0 * num_same / len(A__ )
SCREAMING_SNAKE_CASE_ : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( A__, A__ ):
return normalize_answer(A__ ) == normalize_answer(A__ )
def a__ ( A__, A__ ):
assert len(A__ ) == len(A__ )
SCREAMING_SNAKE_CASE_ : Any = 0
for hypo, pred in zip(A__, A__ ):
em += exact_match_score(A__, A__ )
if len(A__ ) > 0:
em /= len(A__ )
return {"em": em}
def a__ ( A__ ):
return model_prefix.startswith('rag' )
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE_ : List[Any] = 'dropout_rate'
for p in extra_params:
if getattr(A__, A__, A__ ):
if not hasattr(A__, A__ ) and not hasattr(A__, equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(A__ ) )
delattr(A__, A__ )
continue
SCREAMING_SNAKE_CASE_ : Any = p if hasattr(A__, A__ ) else equivalent_param[p]
setattr(A__, A__, getattr(A__, A__ ) )
delattr(A__, A__ )
return hparams, config
| 101 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCAmelCase__ : Any ='''Create a default config file for Accelerate with only a few flags set.'''
def __lowercase ( a__="no" , a__ = default_json_config_file , a__ = False ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = Path(a__ )
path.parent.mkdir(parents=a__ , exist_ok=a__ )
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
__SCREAMING_SNAKE_CASE = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
__SCREAMING_SNAKE_CASE = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
__SCREAMING_SNAKE_CASE = num_gpus
__SCREAMING_SNAKE_CASE = False
if num_gpus > 1:
__SCREAMING_SNAKE_CASE = 'MULTI_GPU'
else:
__SCREAMING_SNAKE_CASE = 'NO'
elif is_xpu_available() and use_xpu:
__SCREAMING_SNAKE_CASE = torch.xpu.device_count()
__SCREAMING_SNAKE_CASE = num_xpus
__SCREAMING_SNAKE_CASE = False
if num_xpus > 1:
__SCREAMING_SNAKE_CASE = 'MULTI_XPU'
else:
__SCREAMING_SNAKE_CASE = 'NO'
elif is_npu_available():
__SCREAMING_SNAKE_CASE = torch.npu.device_count()
__SCREAMING_SNAKE_CASE = num_npus
__SCREAMING_SNAKE_CASE = False
if num_npus > 1:
__SCREAMING_SNAKE_CASE = 'MULTI_NPU'
else:
__SCREAMING_SNAKE_CASE = 'NO'
else:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 'NO'
__SCREAMING_SNAKE_CASE = ClusterConfig(**a__ )
config.to_json_file(a__ )
return path
def __lowercase ( a__ , a__ ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = parser.add_parser('default' , parents=a__ , help=a__ , formatter_class=a__ )
parser.add_argument(
'--config_file' , default=a__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=a__ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=a__ )
return parser
def __lowercase ( a__ ) -> int:
__SCREAMING_SNAKE_CASE = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"""accelerate configuration saved at {config_file}""" )
| 148 | 0 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
a__ : int = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 570 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ : Tuple = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 570 | 1 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
UpperCAmelCase = '''src/transformers'''
UpperCAmelCase = '''docs/source/en/tasks'''
def lowerCamelCase (a_ :List[Any] , a_ :Optional[int] , a_ :List[str]) -> int:
with open(a_ , '''r''' , encoding='''utf-8''' , newline='''\n''') as f:
lowercase :int = f.readlines()
# Find the start prompt.
lowercase :List[str] = 0
while not lines[start_index].startswith(a_):
start_index += 1
start_index += 1
lowercase :Optional[int] = start_index
while not lines[end_index].startswith(a_):
end_index += 1
end_index -= 1
while len(lines[start_index]) <= 1:
start_index += 1
while len(lines[end_index]) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index]), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
UpperCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
UpperCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def lowerCamelCase (a_ :Any) -> Any:
lowercase :int = TASK_GUIDE_TO_MODELS[task_guide]
lowercase :Optional[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(a_ , set())
lowercase :Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()]) + "\n"
def lowerCamelCase (a_ :List[Any] , a_ :List[Any]=False) -> List[str]:
lowercase , lowercase , lowercase , lowercase :Optional[int] = _find_text_in_file(
filename=os.path.join(a_ , a_) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
lowercase :List[str] = get_model_list_for_task(a_)
if current_list != new_list:
if overwrite:
with open(os.path.join(a_ , a_) , '''w''' , encoding='''utf-8''' , newline='''\n''') as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:])
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
''' to fix this.''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 677 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677 | 1 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_UpperCamelCase : Optional[int] = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_UpperCamelCase : Any = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
_UpperCamelCase : str = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def UpperCamelCase_ ( self ):
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = CHRF.CHAR_ORDER , _lowerCamelCase = CHRF.WORD_ORDER , _lowerCamelCase = CHRF.BETA , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , ):
lowercase = len(references[0] )
if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowercase = [[refs[i] for refs in references] for i in range(_lowerCamelCase )]
lowercase = CHRF(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase = sb_chrf.corpus_score(_lowerCamelCase , _lowerCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 706 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : Dict = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
_UpperCamelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase = model_type_to_module_name(__snake_case )
lowercase = importlib.import_module(f'.{module_name}' , 'transformers.models' )
try:
return getattr(__snake_case , __snake_case )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__snake_case , '__name__' , __snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase = importlib.import_module('transformers' )
if hasattr(__snake_case , __snake_case ):
return getattr(__snake_case , __snake_case )
return None
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, os.PathLike] , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : int , ):
'''simple docstring'''
lowercase = get_file_from_repo(
__snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(__snake_case , encoding='utf-8' ) as reader:
return json.load(__snake_case )
class a :
def __init__( self ):
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_lowerCamelCase )
def UpperCamelCase_ ( cls , _lowerCamelCase , **_lowerCamelCase ):
lowercase = kwargs.pop('config' , _lowerCamelCase )
lowercase = kwargs.pop('trust_remote_code' , _lowerCamelCase )
lowercase = True
lowercase , lowercase = FeatureExtractionMixin.get_feature_extractor_dict(_lowerCamelCase , **_lowerCamelCase )
lowercase = config_dict.get('feature_extractor_type' , _lowerCamelCase )
lowercase = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
lowercase = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
lowercase = AutoConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# It could be in `config.feature_extractor_type``
lowercase = getattr(_lowerCamelCase , 'feature_extractor_type' , _lowerCamelCase )
if hasattr(_lowerCamelCase , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
lowercase = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
lowercase = feature_extractor_class_from_name(_lowerCamelCase )
lowercase = feature_extractor_auto_map is not None
lowercase = feature_extractor_class is not None or type(_lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING
lowercase = resolve_trust_remote_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if has_remote_code and trust_remote_code:
lowercase = get_class_from_dynamic_module(
_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
lowercase = kwargs.pop('code_revision' , _lowerCamelCase )
if os.path.isdir(_lowerCamelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING:
lowercase = FEATURE_EXTRACTOR_MAPPING[type(_lowerCamelCase )]
return feature_extractor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
raise ValueError(
F'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '
F'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
FEATURE_EXTRACTOR_MAPPING.register(_lowerCamelCase , _lowerCamelCase )
| 134 | 0 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ :Tuple = logging.get_logger(__name__)
class lowercase :
def __init__( self : str , _lowercase : str = None , _lowercase : uuid.UUID = None , _lowercase : Any=None , _lowercase : Optional[int]=None ):
if not conversation_id:
SCREAMING_SNAKE_CASE__ : Dict = uuid.uuida()
if past_user_inputs is None:
SCREAMING_SNAKE_CASE__ : Dict = []
if generated_responses is None:
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : uuid.UUID = conversation_id
SCREAMING_SNAKE_CASE__ : List[str] = past_user_inputs
SCREAMING_SNAKE_CASE__ : List[str] = generated_responses
SCREAMING_SNAKE_CASE__ : Optional[str] = text
def __eq__( self : Optional[int] , _lowercase : Dict ):
if not isinstance(_lowercase , _lowercase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowercase__ ( self : Dict , _lowercase : str , _lowercase : bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
SCREAMING_SNAKE_CASE__ : int = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = text
def lowercase__ ( self : int ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
SCREAMING_SNAKE_CASE__ : int = None
def lowercase__ ( self : List[Any] , _lowercase : str ):
self.generated_responses.append(_lowercase )
def lowercase__ ( self : Union[str, Any] ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : str ):
SCREAMING_SNAKE_CASE__ : Dict = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
SCREAMING_SNAKE_CASE__ : List[str] = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
_UpperCAmelCase , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowercase ( _UpperCAmelCase ):
def __init__( self : Dict , *_lowercase : List[str] , **_lowercase : List[Any] ):
super().__init__(*_lowercase , **_lowercase )
if self.tokenizer.pad_token_id is None:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.eos_token
def lowercase__ ( self : str , _lowercase : List[Any]=None , _lowercase : Tuple=None , _lowercase : Tuple=None , **_lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : int = {}
if min_length_for_response is not None:
SCREAMING_SNAKE_CASE__ : str = min_length_for_response
if minimum_tokens is not None:
SCREAMING_SNAKE_CASE__ : Dict = minimum_tokens
if "max_length" in generate_kwargs:
SCREAMING_SNAKE_CASE__ : Optional[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_lowercase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , _lowercase : Union[Conversation, List[Conversation]] , _lowercase : Optional[Any]=0 , **_lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Any = super().__call__(_lowercase , num_workers=_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
def lowercase__ ( self : int , _lowercase : Conversation , _lowercase : str=32 ):
if not isinstance(_lowercase , _lowercase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer._build_conversation_input_ids(_lowercase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
SCREAMING_SNAKE_CASE__ : Any = self._legacy_parse_and_tokenize(_lowercase )
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowercase__ ( self : str , _lowercase : Optional[Any] , _lowercase : Tuple=10 , **_lowercase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = generate_kwargs.get('''max_length''' , self.model.config.max_length )
SCREAMING_SNAKE_CASE__ : Tuple = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
SCREAMING_SNAKE_CASE__ : List[Any] = max_length - minimum_tokens
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs['''attention_mask'''][:, -trim:]
SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs.pop('''conversation''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_length
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate(**_lowercase , **_lowercase )
if self.model.config.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ : int = 1
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowercase__ ( self : Optional[Any] , _lowercase : str , _lowercase : int=True ):
SCREAMING_SNAKE_CASE__ : Tuple = model_outputs['''output_ids''']
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
SCREAMING_SNAKE_CASE__ : str = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(_lowercase )
return conversation
def lowercase__ ( self : Union[str, Any] , _lowercase : Conversation ):
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
if len(_lowercase ) > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : Tuple = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 35 | """simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase )
class _A ( lowerCAmelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
snake_case__ : str = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
snake_case__ : ClassVar[Features] = Features({'question': Value('string' ), 'context': Value('string' )} )
snake_case__ : ClassVar[Features] = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
snake_case__ : str = "question"
snake_case__ : str = "context"
snake_case__ : str = "answers"
@property
def A__ ( self ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 359 | 0 |
import numpy as np
from transformers import Pipeline
def _lowercase ( UpperCamelCase_ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.max(UpperCamelCase_ , axis=-1 , keepdims=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=UpperCamelCase_ )
class lowercase__ ( _UpperCAmelCase ):
def A_ ( self : Dict , **UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE__ = {}
if "second_text" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs['second_text']
return preprocess_kwargs, {}, {}
def A_ ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=None ):
return self.tokenizer(UpperCAmelCase_ , text_pair=UpperCAmelCase_ , return_tensors=self.framework )
def A_ ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] ):
return self.model(**UpperCAmelCase_ )
def A_ ( self : Optional[int] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = model_outputs.logits[0].numpy()
SCREAMING_SNAKE_CASE__ = softmax(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = np.argmax(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.model.config.idalabel[best_class]
SCREAMING_SNAKE_CASE__ = probabilities[best_class].item()
SCREAMING_SNAKE_CASE__ = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 400 |
import math
class lowercase__ :
def A_ ( self : int , UpperCAmelCase_ : list[list[float]] , UpperCAmelCase_ : list[int] ):
SCREAMING_SNAKE_CASE__ = 0.0
SCREAMING_SNAKE_CASE__ = 0.0
for i in range(len(UpperCAmelCase_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def A_ ( self : Tuple , UpperCAmelCase_ : list[list[int | float]] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : float ):
for i in range(len(UpperCAmelCase_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowercase ( ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
SCREAMING_SNAKE_CASE__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
SCREAMING_SNAKE_CASE__ = SelfOrganizingMap()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = 0.5
for _ in range(UpperCamelCase_ ):
for j in range(len(UpperCamelCase_ ) ):
# training sample
SCREAMING_SNAKE_CASE__ = training_samples[j]
# Compute the winning vector
SCREAMING_SNAKE_CASE__ = self_organizing_map.get_winner(UpperCamelCase_ , UpperCamelCase_ )
# Update the winning vector
SCREAMING_SNAKE_CASE__ = self_organizing_map.update(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# classify test sample
SCREAMING_SNAKE_CASE__ = [0, 0, 0, 1]
SCREAMING_SNAKE_CASE__ = self_organizing_map.get_winner(UpperCamelCase_ , UpperCamelCase_ )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 400 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 37 |
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : str=13 , lowerCAmelCase : List[Any]=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Tuple=99 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : Optional[int]=5 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : Union[str, Any]=37 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[int]=50 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Union[str, Any]=None , ):
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = use_labels
lowerCAmelCase = scope
def __lowercase ( self : int ):
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowercase ( self : List[str] ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def __lowercase ( self : Dict ):
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowercase ( self : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any] , ):
lowerCAmelCase = BertGenerationEncoder(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
lowerCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : List[Any] , **lowerCAmelCase : Tuple , ):
lowerCAmelCase = True
lowerCAmelCase = BertGenerationEncoder(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )
lowerCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Optional[int] , ):
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = BertGenerationDecoder(config=lowerCAmelCase ).to(lowerCAmelCase ).eval()
# first forward pass
lowerCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , output_hidden_states=lowerCAmelCase , )["""hidden_states"""][0]
lowerCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , output_hidden_states=lowerCAmelCase , )["""hidden_states"""][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
def __lowercase ( self : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , *lowerCAmelCase : Union[str, Any] , ):
lowerCAmelCase = BertGenerationDecoder(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : List[Any] ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _a , _a , _a , unittest.TestCase ):
_a = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_a = (BertGenerationDecoder,) if is_torch_available() else ()
_a = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = BertGenerationEncoderTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def __lowercase ( self : Dict ):
self.config_tester.run_common_tests()
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def __lowercase ( self : List[str] ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase = """bert"""
self.model_tester.create_and_check_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : str ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase )
def __lowercase ( self : List[str] ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCAmelCase )
def __lowercase ( self : Any ):
# This regression test was failing with PyTorch < 1.3
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
def __lowercase ( self : List[str] ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase )
@slow
def __lowercase ( self : int ):
lowerCAmelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def __lowercase ( self : Dict ):
lowerCAmelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
lowerCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowerCAmelCase = model(lowerCAmelCase )[0]
lowerCAmelCase = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowerCAmelCase )
lowerCAmelCase = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def __lowercase ( self : Tuple ):
lowerCAmelCase = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
lowerCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowerCAmelCase = model(lowerCAmelCase )[0]
lowerCAmelCase = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , lowerCAmelCase )
lowerCAmelCase = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
| 169 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __UpperCamelCase :
"""simple docstring"""
_lowercase : Union[str, Any] = None
_lowercase : Union[str, Any] = None
_lowercase : List[str] = None # sigma(t_i)
@classmethod
def _UpperCAmelCase ( cls ) -> str:
return cls()
@dataclass
class __UpperCamelCase ( __A ):
"""simple docstring"""
_lowercase : Any = 42
_lowercase : int = 42
_lowercase : Tuple = 42
class __UpperCamelCase ( __A , __A ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return True
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE = 1_0_0 , SCREAMING_SNAKE_CASE = 1.0_07 , SCREAMING_SNAKE_CASE = 8_0 , SCREAMING_SNAKE_CASE = 0.05 , SCREAMING_SNAKE_CASE = 5_0 , ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Dict:
return KarrasVeSchedulerState.create()
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = () ) -> List[Any]:
a__ = jnp.arange(0 , SCREAMING_SNAKE_CASE )[::-1].copy()
a__ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE , schedule=jnp.array(SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) , timesteps=SCREAMING_SNAKE_CASE , )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
if self.config.s_min <= sigma <= self.config.s_max:
a__ = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
a__ = 0
# sample eps ~ N(0, S_noise^2 * I)
a__ = random.split(SCREAMING_SNAKE_CASE , num=1 )
a__ = self.config.s_noise * random.normal(key=SCREAMING_SNAKE_CASE , shape=sample.shape )
a__ = sigma + gamma * sigma
a__ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , ) -> Optional[int]:
a__ = sample_hat + sigma_hat * model_output
a__ = (sample_hat - pred_original_sample) / sigma_hat
a__ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=SCREAMING_SNAKE_CASE , derivative=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , ) -> List[Any]:
a__ = sample_prev + sigma_prev * model_output
a__ = (sample_prev - pred_original_sample) / sigma_prev
a__ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=SCREAMING_SNAKE_CASE , derivative=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
raise NotImplementedError()
| 704 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
a_ : Any = random.Random()
def __a ( __UpperCAmelCase , __UpperCAmelCase=1.0 , __UpperCAmelCase=None , __UpperCAmelCase=None ):
if rng is None:
a__ = global_rng
a__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=4_0_0 , SCREAMING_SNAKE_CASE=2_0_0_0 , SCREAMING_SNAKE_CASE=1_0 , SCREAMING_SNAKE_CASE=1_6_0 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=4_0_0_0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , ) -> Union[str, Any]:
a__ = parent
a__ = batch_size
a__ = min_seq_length
a__ = max_seq_length
a__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__ = padding_value
a__ = sampling_rate
a__ = return_attention_mask
a__ = do_normalize
a__ = feature_size
a__ = chunk_length
a__ = hop_length
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Any:
def _flatten(SCREAMING_SNAKE_CASE ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE ) )
if equal_length:
a__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__ = [np.asarray(SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __UpperCamelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
_lowercase : List[Any] = WhisperFeatureExtractor if is_speech_available() else None
def _UpperCAmelCase ( self ) -> Optional[int]:
a__ = WhisperFeatureExtractionTester(self )
def _UpperCAmelCase ( self ) -> List[Any]:
a__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(SCREAMING_SNAKE_CASE )
a__ = self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE )
a__ = feat_extract_first.to_dict()
a__ = feat_extract_second.to_dict()
a__ = feat_extract_first.mel_filters
a__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Any:
a__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(SCREAMING_SNAKE_CASE , '''feat_extract.json''' )
feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE )
a__ = self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE )
a__ = feat_extract_first.to_dict()
a__ = feat_extract_second.to_dict()
a__ = feat_extract_first.mel_filters
a__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ = [np.asarray(SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test feature size
a__ = feature_extractor(SCREAMING_SNAKE_CASE , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
a__ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
a__ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test batched
a__ = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features
a__ = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
a__ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
a__ = np.asarray(SCREAMING_SNAKE_CASE )
a__ = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features
a__ = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test truncation required
a__ = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
a__ = [np.asarray(SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
a__ = [x[: feature_extractor.n_samples] for x in speech_inputs]
a__ = [np.asarray(SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs_truncated]
a__ = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features
a__ = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def _UpperCAmelCase ( self ) -> int:
import torch
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
a__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
a__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
a__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
a__ = ds.sort('''id''' ).select(range(SCREAMING_SNAKE_CASE ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _UpperCAmelCase ( self ) -> Optional[int]:
# fmt: off
a__ = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
a__ = self._load_datasamples(1 )
a__ = WhisperFeatureExtractor()
a__ = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
def _UpperCAmelCase ( self ) -> List[str]:
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ = self._load_datasamples(1 )[0]
a__ = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
a__ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=SCREAMING_SNAKE_CASE )[0]
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE ) - 1 ) < 1e-3 ) )
| 148 | 0 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowerCamelCase : Union[str, Any] = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __lowerCamelCase ( A__ ) -> List[str]:
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __lowerCamelCase ( A__ , A__ ) -> Dict:
"""simple docstring"""
if args.student_type == "roberta":
UpperCamelCase = False
elif args.student_type == "gpt2":
UpperCamelCase = False
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
if args.student_type == "roberta":
UpperCamelCase = False
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=A__ , required=A__ , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=A__ , required=A__ , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=A__ , choices=['distilbert', 'roberta', 'gpt2'] , required=A__ , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=A__ , required=A__ , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=A__ , type=A__ , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=A__ , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=A__ , required=A__ , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=A__ , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=A__ , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=A__ , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=A__ , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=A__ , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=A__ , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=A__ , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=A__ , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=A__ , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=A__ , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=A__ , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=A__ , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=A__ , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=A__ , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=A__ , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=A__ , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=A__ , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5e-4 , type=A__ , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1e-6 , type=A__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=A__ , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.02 , type=A__ , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=A__ , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=A__ , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=A__ , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=A__ , default=56 , help='Random seed' )
parser.add_argument('--log_interval' , type=A__ , default=500 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=A__ , default=4_000 , help='Checkpoint interval.' )
UpperCamelCase = parser.parse_args()
sanity_checks(A__ )
# ARGS #
init_gpu_params(A__ )
set_seed(A__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(A__ ) , A__ , indent=4 )
git_log(args.dump_path )
UpperCamelCase , UpperCamelCase , UpperCamelCase = MODEL_CLASSES[args.student_type]
UpperCamelCase , UpperCamelCase , UpperCamelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCamelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCamelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCamelCase = tokenizer.all_special_tokens.index(A__ )
UpperCamelCase = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
UpperCamelCase = special_tok_ids
UpperCamelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , 'rb' ) as fp:
UpperCamelCase = pickle.load(A__ )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , 'rb' ) as fp:
UpperCamelCase = pickle.load(A__ )
UpperCamelCase = np.maximum(A__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCamelCase = 0.0 # do not predict special tokens
UpperCamelCase = torch.from_numpy(A__ )
else:
UpperCamelCase = None
UpperCamelCase = LmSeqsDataset(params=A__ , data=A__ )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
UpperCamelCase = student_config_class.from_pretrained(args.student_config )
UpperCamelCase = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCamelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=A__ )
else:
UpperCamelCase = student_model_class(A__ )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info('Student loaded.' )
# TEACHER #
UpperCamelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=A__ )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A__ , A__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A__ , A__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCamelCase = Distiller(
params=A__ , dataset=A__ , token_probs=A__ , student=A__ , teacher=A__ )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 430 |
'''simple docstring'''
from math import factorial
def __lowerCamelCase ( A__ , A__ , A__ ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(A__ , A__ ) or not isinstance(A__ , A__ ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
UpperCamelCase = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
UpperCamelCase = float(factorial(A__ ) )
coefficient /= factorial(A__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 430 | 1 |
'''simple docstring'''
from __future__ import annotations
import bisect
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int = 0 ,_UpperCAmelCase : int = -1 ) -> int:
if hi < 0:
__snake_case : Tuple = len(_UpperCAmelCase )
while lo < hi:
__snake_case : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__snake_case : Union[str, Any] = mid + 1
else:
__snake_case : Union[str, Any] = mid
return lo
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int = 0 ,_UpperCAmelCase : int = -1 ) -> int:
if hi < 0:
__snake_case : List[Any] = len(_UpperCAmelCase )
while lo < hi:
__snake_case : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__snake_case : Optional[int] = mid + 1
else:
__snake_case : Any = mid
return lo
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int = 0 ,_UpperCAmelCase : int = -1 ) -> None:
sorted_collection.insert(bisect_left(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ,_UpperCAmelCase )
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int = 0 ,_UpperCAmelCase : int = -1 ) -> None:
sorted_collection.insert(bisect_right(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ,_UpperCAmelCase )
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> int | None:
__snake_case : str = 0
__snake_case : Optional[Any] = len(_UpperCAmelCase ) - 1
while left <= right:
__snake_case : Any = left + (right - left) // 2
__snake_case : str = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__snake_case : Union[str, Any] = midpoint - 1
else:
__snake_case : Union[str, Any] = midpoint + 1
return None
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> int | None:
__snake_case : Optional[Any] = bisect.bisect_left(_UpperCAmelCase ,_UpperCAmelCase )
if index != len(_UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int | None:
if right < left:
return None
__snake_case : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCAmelCase ,_UpperCAmelCase ,midpoint + 1 ,_UpperCAmelCase )
if __name__ == "__main__":
A__ : Tuple = input('''Enter numbers separated by comma:\n''').strip()
A__ : Dict = sorted(int(item) for item in user_input.split(''','''))
A__ : Any = int(input('''Enter a single number to be found in the list:\n'''))
A__ : Tuple = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 709 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[int] = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
A__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 124 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
A_ = TypeVar("""T""")
class __lowerCamelCase ( Generic[T] ):
def __init__( self , UpperCAmelCase ):
lowerCamelCase_ = data
lowerCamelCase_ = self
lowerCamelCase_ = 0
class __lowerCamelCase ( Generic[T] ):
def __init__( self ):
# map from node name to the node object
lowerCamelCase_ = {}
def UpperCAmelCase__ ( self , UpperCAmelCase ):
# create a new set with x as its member
lowerCamelCase_ = DisjointSetTreeNode(UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
# find the set x belongs to (with path-compression)
lowerCamelCase_ = self.map[data]
if elem_ref != elem_ref.parent:
lowerCamelCase_ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
# helper function for union operation
if nodea.rank > nodea.rank:
lowerCamelCase_ = nodea
else:
lowerCamelCase_ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
# merge 2 disjoint sets
self.link(self.find_set(UpperCAmelCase ) , self.find_set(UpperCAmelCase ) )
class __lowerCamelCase ( Generic[T] ):
def __init__( self ):
# connections: map from the node to the neighbouring nodes (with weights)
lowerCamelCase_ = {}
def UpperCAmelCase__ ( self , UpperCAmelCase ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
lowerCamelCase_ = {}
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
# add an edge with the given weight
self.add_node(UpperCAmelCase )
self.add_node(UpperCAmelCase )
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = []
lowerCamelCase_ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda UpperCAmelCase : x[2] )
# creating the disjoint set
lowerCamelCase_ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(UpperCAmelCase )
# MST generation
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = edges[index]
index += 1
lowerCamelCase_ = disjoint_set.find_set(UpperCAmelCase )
lowerCamelCase_ = disjoint_set.find_set(UpperCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
disjoint_set.union(UpperCAmelCase , UpperCAmelCase )
return graph
| 29 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowerCamelCase : Dict = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
inspect_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase = path + ".py"
assert script_name in os.listdir(lowerCAmelCase_ )
assert "__pycache__" not in os.listdir(lowerCAmelCase_ )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
inspect_metric(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase = path + ".py"
assert script_name in os.listdir(lowerCAmelCase_ )
assert "__pycache__" not in os.listdir(lowerCAmelCase_ )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = get_dataset_config_info(lowerCAmelCase_ , config_name=lowerCAmelCase_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase_ ):
get_dataset_config_info(lowerCAmelCase_ , config_name=lowerCAmelCase_ )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = get_dataset_config_names(lowerCAmelCase_ )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = get_dataset_infos(lowerCAmelCase_ )
assert list(infos.keys() ) == expected_configs
lowercase = expected_configs[0]
assert expected_config in infos
lowercase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = get_dataset_infos(lowerCAmelCase_ )
assert expected_config in infos
lowercase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase_ ):
get_dataset_split_names(lowerCAmelCase_ , config_name=lowerCAmelCase_ )
| 310 | 0 |
"""simple docstring"""
import unittest
import numpy as np
def _snake_case ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : np.ndarray | None = None , ):
A__ = np.shape(UpperCAmelCase_ )
A__ = np.shape(UpperCAmelCase_ )
A__ = np.shape(UpperCAmelCase_ )
if shape_a[0] != shape_b[0]:
A__ = (
"""Expected the same number of rows for A and B. """
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(UpperCAmelCase_ )
if shape_b[1] != shape_c[1]:
A__ = (
"""Expected the same number of columns for B and C. """
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(UpperCAmelCase_ )
A__ = pseudo_inv
if a_inv is None:
try:
A__ = np.linalg.inv(UpperCAmelCase_ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A__ = np.array([[0, 3], [3, 0], [2, 3]] )
A__ = np.array([[2, 1], [6, 3]] )
A__ = schur_complement(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = np.block([[a, b], [b.T, c]] )
A__ = np.linalg.det(UpperCamelCase )
A__ = np.linalg.det(UpperCamelCase )
A__ = np.linalg.det(UpperCamelCase )
self.assertAlmostEqual(UpperCamelCase , det_a * det_s )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A__ = np.array([[0, 3], [3, 0], [2, 3]] )
A__ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCamelCase ):
schur_complement(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A__ = np.array([[0, 3], [3, 0], [2, 3]] )
A__ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCamelCase ):
schur_complement(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 500 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "segformer"
def __init__( self: Optional[Any] , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: int=4 , UpperCamelCase: int=[2, 2, 2, 2] , UpperCamelCase: List[str]=[8, 4, 2, 1] , UpperCamelCase: List[Any]=[32, 64, 1_60, 2_56] , UpperCamelCase: Any=[7, 3, 3, 3] , UpperCamelCase: Union[str, Any]=[4, 2, 2, 2] , UpperCamelCase: Tuple=[1, 2, 5, 8] , UpperCamelCase: Optional[int]=[4, 4, 4, 4] , UpperCamelCase: Dict="gelu" , UpperCamelCase: Optional[Any]=0.0 , UpperCamelCase: List[str]=0.0 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: int=0.1 , UpperCamelCase: Optional[int]=1e-6 , UpperCamelCase: int=2_56 , UpperCamelCase: Union[str, Any]=2_55 , **UpperCamelCase: Optional[Any] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase , )
A__ = num_channels
A__ = num_encoder_blocks
A__ = depths
A__ = sr_ratios
A__ = hidden_sizes
A__ = patch_sizes
A__ = strides
A__ = mlp_ratios
A__ = num_attention_heads
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = classifier_dropout_prob
A__ = initializer_range
A__ = drop_path_rate
A__ = layer_norm_eps
A__ = decoder_hidden_size
A__ = kwargs.get("""reshape_last_stage""" , UpperCamelCase )
A__ = semantic_loss_ignore_index
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = version.parse("1.11" )
@property
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
return 1e-4
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return 12
| 500 | 1 |
'''simple docstring'''
from __future__ import annotations
lowercase : Optional[int] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __a ( A__ , A__ , A__ , A__ , A__ , ) -> tuple[list[list[int]], list[list[int]]]:
lowerCAmelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(A__ ) )
] # the reference grid
lowerCAmelCase = 1
lowerCAmelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(A__ ) )
] # the action grid
lowerCAmelCase = init[0]
lowerCAmelCase = init[1]
lowerCAmelCase = 0
lowerCAmelCase = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCAmelCase = [[f, g, x, y]]
lowerCAmelCase = False # flag that is set when search is complete
lowerCAmelCase = False # flag set if we can't find expand
while not found and not resign:
if len(A__ ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCAmelCase = cell.pop()
lowerCAmelCase = next_cell[2]
lowerCAmelCase = next_cell[3]
lowerCAmelCase = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCAmelCase = True
else:
for i in range(len(A__ ) ): # to try out different valid actions
lowerCAmelCase = x + DIRECTIONS[i][0]
lowerCAmelCase = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(A__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCAmelCase = g + cost
lowerCAmelCase = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCAmelCase = 1
lowerCAmelCase = i
lowerCAmelCase = []
lowerCAmelCase = goal[0]
lowerCAmelCase = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCAmelCase = x - DIRECTIONS[action[x][y]][0]
lowerCAmelCase = y - DIRECTIONS[action[x][y]][1]
lowerCAmelCase = xa
lowerCAmelCase = ya
invpath.append([x, y] )
lowerCAmelCase = []
for i in range(len(A__ ) ):
path.append(invpath[len(A__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowercase : Optional[Any] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowercase : Tuple = [0, 0]
# all coordinates are given in format [y,x]
lowercase : List[Any] = [len(grid) - 1, len(grid[0]) - 1]
lowercase : List[str] = 1
# the cost map which pushes the path closer to the goal
lowercase : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowercase : Union[str, Any] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowercase : int = 9_9
lowercase , lowercase : List[str] = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 649 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "width_multiplier" ) )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]=1_3 , SCREAMING_SNAKE_CASE : int=6_4 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : Dict="swish" , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : Any=3_2 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : int=1_0 , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Optional[Any]=0.2_5 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : int=0.0 , ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = make_divisible(5_1_2 * width_multiplier , divisor=8 )
lowerCAmelCase = hidden_act
lowerCAmelCase = conv_kernel_size
lowerCAmelCase = output_stride
lowerCAmelCase = classifier_dropout_prob
lowerCAmelCase = use_labels
lowerCAmelCase = is_training
lowerCAmelCase = num_labels
lowerCAmelCase = initializer_range
lowerCAmelCase = scope
lowerCAmelCase = width_multiplier
lowerCAmelCase = ffn_dropout
lowerCAmelCase = attn_dropout
def __A ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self : int ) -> Dict:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __A ( self : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase = MobileViTVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self : Tuple ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def __A ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = MobileViTVaModelTester(self )
lowerCAmelCase = MobileViTVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE )
def __A ( self : Tuple ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def __A ( self : str ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def __A ( self : Dict ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def __A ( self : Tuple ) -> Dict:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def __A ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __A ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def __A ( self : Any ) -> str:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __A ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase = 2
for i in range(len(SCREAMING_SNAKE_CASE ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __A ( self : Tuple ) -> int:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def __A ( self : Dict ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE )
@slow
def __A ( self : Any ) -> Dict:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __a ( ) -> List[Any]:
lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : str ) -> Any:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def __A ( self : List[str] ) -> int:
"""simple docstring"""
lowerCAmelCase = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def __A ( self : Any ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase = model.to(SCREAMING_SNAKE_CASE )
lowerCAmelCase = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.logits
# verify the logits
lowerCAmelCase = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def __A ( self : Any ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase = model.to(SCREAMING_SNAKE_CASE )
lowerCAmelCase = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.logits.detach().cpu()
lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE , target_sizes=[(5_0, 6_0)] )
lowerCAmelCase = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE )
| 649 | 1 |
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowercase : Any = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowercase : Tuple = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> List[str]:
for attribute in key.split('.' ):
_snake_case = getattr(__A , __A )
if weight_type is not None:
_snake_case = getattr(__A , __A ).shape
else:
_snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Dict:
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.feature_extractor
_snake_case = hf_model.adapter
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
_snake_case = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(__A , __A , __A , __A )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(__A )[0].split('.' )[-2]
_snake_case = mapped_key.replace('*' , __A )
if "weight_g" in name:
_snake_case = 'weight_g'
elif "weight_v" in name:
_snake_case = 'weight_v'
elif "bias" in name:
_snake_case = 'bias'
elif "weight" in name:
_snake_case = 'weight'
else:
_snake_case = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> Dict:
_snake_case = full_name.split('conv_layers.' )[-1]
_snake_case = name.split('.' )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A ) -> Optional[Any]:
_snake_case = full_name.split('adaptor.' )[-1]
_snake_case = name.split('.' )
if items[1].isdigit():
_snake_case = int(items[1] )
else:
_snake_case = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
_snake_case = value
logger.info(F'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
_snake_case = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
_snake_case = value
logger.info(F'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
_snake_case = value
logger.info(F'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(__A , __A ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
_snake_case = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
_snake_case = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(__A )
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__A , __A , bias=__A )
_snake_case = emb.weight.data
return lin_layer
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> Any:
_snake_case = WavaVecaConfig.from_pretrained(
__A , add_adapter=__A , adapter_stride=__A , adapter_kernel_size=__A , use_auth_token=__A , output_hidden_size=__A , )
_snake_case = MBartConfig.from_pretrained(__A )
# load model
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
_snake_case = model[0].eval()
# load feature extractor
_snake_case = WavaVecaFeatureExtractor.from_pretrained(__A , use_auth_token=__A )
# set weights for wav2vec2 encoder
_snake_case = WavaVecaModel(__A )
recursively_load_weights_wavaveca(model.encoder , __A )
# load decoder weights
_snake_case = MBartForCausalLM(__A )
_snake_case , _snake_case = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__A )
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
_snake_case = SpeechEncoderDecoderModel(encoder=__A , decoder=__A )
_snake_case = False
_snake_case = MBartaaTokenizer(__A )
tokenizer.save_pretrained(__A )
_snake_case = hf_wavavec.config.to_dict()
_snake_case = tokenizer.pad_token_id
_snake_case = tokenizer.bos_token_id
_snake_case = tokenizer.eos_token_id
_snake_case = 'mbart50'
_snake_case = 'wav2vec2'
_snake_case = tokenizer.eos_token_id
_snake_case = 250_004
_snake_case = tokenizer.eos_token_id
_snake_case = SpeechEncoderDecoderConfig.from_dict(__A )
hf_wavavec.save_pretrained(__A )
feature_extractor.save_pretrained(__A )
if __name__ == "__main__":
lowercase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=25_0004, type=int, help="`decoder_start_token_id` of model config")
lowercase : List[str] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 700 |
'''simple docstring'''
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> list:
if len(__A ) != 2 or len(a[0] ) != 2 or len(__A ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
_snake_case = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Dict:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Dict:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def SCREAMING_SNAKE_CASE__ ( __A ) -> tuple[list, list, list, list]:
if len(__A ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
_snake_case = len(__A )
_snake_case = matrix_length // 2
_snake_case = [[a[i][j] for j in range(__A , __A )] for i in range(__A )]
_snake_case = [
[a[i][j] for j in range(__A , __A )] for i in range(__A , __A )
]
_snake_case = [[a[i][j] for j in range(__A )] for i in range(__A )]
_snake_case = [[a[i][j] for j in range(__A )] for i in range(__A , __A )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( __A ) -> tuple[int, int]:
return len(__A ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( __A ) -> None:
print('\n'.join(str(__A ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> list:
if matrix_dimensions(__A ) == (2, 2):
return default_matrix_multiplication(__A , __A )
_snake_case , _snake_case , _snake_case , _snake_case = split_matrix(__A )
_snake_case , _snake_case , _snake_case , _snake_case = split_matrix(__A )
_snake_case = actual_strassen(__A , matrix_subtraction(__A , __A ) )
_snake_case = actual_strassen(matrix_addition(__A , __A ) , __A )
_snake_case = actual_strassen(matrix_addition(__A , __A ) , __A )
_snake_case = actual_strassen(__A , matrix_subtraction(__A , __A ) )
_snake_case = actual_strassen(matrix_addition(__A , __A ) , matrix_addition(__A , __A ) )
_snake_case = actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
_snake_case = actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
_snake_case = matrix_addition(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
_snake_case = matrix_addition(__A , __A )
_snake_case = matrix_addition(__A , __A )
_snake_case = matrix_subtraction(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
# construct the new matrix from our 4 quadrants
_snake_case = []
for i in range(len(__A ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__A ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> list:
if matrix_dimensions(__A )[1] != matrix_dimensions(__A )[0]:
_snake_case = (
'Unable to multiply these matrices, please check the dimensions.\n'
F'Matrix A: {matrixa}\n'
F'Matrix B: {matrixa}'
)
raise Exception(__A )
_snake_case = matrix_dimensions(__A )
_snake_case = matrix_dimensions(__A )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
_snake_case = max(*__A , *__A )
_snake_case = int(math.pow(2 , math.ceil(math.loga(__A ) ) ) )
_snake_case = matrixa
_snake_case = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
_snake_case = actual_strassen(__A , __A )
# Removing the additional zeros
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowercase : Any = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowercase : List[str] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 542 | 0 |
import cva
import numpy as np
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : float , UpperCamelCase__ : int):
'''simple docstring'''
if k in (0.04, 0.06):
snake_case__ = k
snake_case__ = window_size
else:
raise ValueError("""invalid k value""")
def __str__( self : List[Any]):
'''simple docstring'''
return str(self.k)
def __magic_name__ ( self : Optional[Any] , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = cva.imread(UpperCamelCase__ , 0)
snake_case__ , snake_case__ = img.shape
snake_case__ = []
snake_case__ = img.copy()
snake_case__ = cva.cvtColor(UpperCamelCase__ , cva.COLOR_GRAY2RGB)
snake_case__ , snake_case__ = np.gradient(UpperCamelCase__)
snake_case__ = dx**2
snake_case__ = dy**2
snake_case__ = dx * dy
snake_case__ = 0.04
snake_case__ = self.window_size // 2
for y in range(UpperCamelCase__ , h - offset):
for x in range(UpperCamelCase__ , w - offset):
snake_case__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case__ = (wxx * wyy) - (wxy**2)
snake_case__ = wxx + wyy
snake_case__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0) , 0)
color_img.itemset((y, x, 1) , 0)
color_img.itemset((y, x, 2) , 2_5_5)
return color_img, corner_list
if __name__ == "__main__":
a__ = HarrisCorner(0.04, 3)
a__ , a__ = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 654 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ''''''
_lowercase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase : str = None # compression type in fsspec. ex: "gzip"
_lowercase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , UpperCamelCase__ : str = "" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
super().__init__(self , **UpperCamelCase__)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case__ = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case__ = os.path.basename(self.file.path.split("""::""")[0])
snake_case__ = (
self.compressed_name[: self.compressed_name.rindex(""".""")]
if """.""" in self.compressed_name
else self.compressed_name
)
snake_case__ = None
@classmethod
def __magic_name__ ( cls : Union[str, Any] , UpperCamelCase__ : List[Any]):
'''simple docstring'''
return super()._strip_protocol(UpperCamelCase__).lstrip("""/""")
def __magic_name__ ( self : Dict):
'''simple docstring'''
if self.dir_cache is None:
snake_case__ = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name}
snake_case__ = {f["""name"""]: f}
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str):
'''simple docstring'''
return self.file.open().read()
def __magic_name__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
snake_case__ = self._strip_protocol(UpperCamelCase__)
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''')
return self.file.open()
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''bz2'''
_lowercase : Dict = '''bz2'''
_lowercase : Optional[int] = '''.bz2'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''gzip'''
_lowercase : List[str] = '''gzip'''
_lowercase : Any = '''.gz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = '''lz4'''
_lowercase : List[Any] = '''lz4'''
_lowercase : Dict = '''.lz4'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''xz'''
_lowercase : Union[str, Any] = '''xz'''
_lowercase : Optional[int] = '''.xz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''zstd'''
_lowercase : Tuple = '''zstd'''
_lowercase : Union[str, Any] = '''.zst'''
def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : int , ):
'''simple docstring'''
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case__ = self.file.__enter__
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = file_
def __enter__( self : List[str]):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__)
def __iter__( self : Any):
'''simple docstring'''
return iter(self._file)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return next(self._file)
def __getattr__( self : Any , UpperCamelCase__ : int):
'''simple docstring'''
return getattr(self._file , UpperCamelCase__)
def fixed_enter(*UpperCamelCase__ : int , **UpperCamelCase__ : int):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__))
snake_case__ = fixed_enter
| 654 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
_SCREAMING_SNAKE_CASE : Dict = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=_A )
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path_factory.getbasetemp() / '''cache'''
SCREAMING_SNAKE_CASE__ = test_hf_cache_home / '''datasets'''
SCREAMING_SNAKE_CASE__ = test_hf_cache_home / '''metrics'''
SCREAMING_SNAKE_CASE__ = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(_A ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(_A ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(_A ) )
SCREAMING_SNAKE_CASE__ = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(_A ) )
SCREAMING_SNAKE_CASE__ = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_A ) )
@pytest.fixture(autouse=_A , scope='''session''' )
def UpperCAmelCase_ ( ):
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=_A )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , _A )
@pytest.fixture
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , _A )
| 472 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 472 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 275 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A : str = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 275 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 280 |
'''simple docstring'''
import numpy as np
def a ( __a , __a , __a , __a , __a ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Tuple = int(np.ceil((x_end - xa) / h ) )
UpperCamelCase__ :Optional[int] = np.zeros((n + 1,) )
UpperCamelCase__ :List[str] = ya
UpperCamelCase__ :Tuple = xa
for k in range(__a ):
UpperCamelCase__ :Dict = f(__a , y[k] )
UpperCamelCase__ :List[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCamelCase__ :Dict = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCamelCase__ :List[Any] = f(x + h , y[k] + h * ka )
UpperCamelCase__ :List[str] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 280 | 1 |
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE__ : list ) -> None:
lowerCAmelCase__ = set_counts
lowerCAmelCase__ = max(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = [1] * num_sets
lowerCAmelCase__ = list(range(SCREAMING_SNAKE_CASE__ ) )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> bool:
lowerCAmelCase__ = self.get_parent(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.get_parent(SCREAMING_SNAKE_CASE__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowerCAmelCase__ = 0
lowerCAmelCase__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowerCAmelCase__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowerCAmelCase__ = 0
lowerCAmelCase__ = src_parent
lowerCAmelCase__ = self.set_counts[src_parent]
lowerCAmelCase__ = max(self.max_set , SCREAMING_SNAKE_CASE__ )
return True
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
lowerCAmelCase__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 61 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __A ( self : Tuple ) -> Optional[int]:
__lowerCamelCase = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
__lowerCamelCase = {
'''input_ids''': tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
__lowerCamelCase = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice.
__lowerCamelCase = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 298 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self , a_ , a_=1_3 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=False , a_=False , a_=False , a_=2 , a_=9_9 , a_=0 , a_=3_2 , a_=5 , a_=4 , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_2 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_="last" , a_=None , a_=None , ) -> Optional[int]:
lowercase : Optional[int] = parent
lowercase : str = batch_size
lowercase : List[str] = seq_length
lowercase : Dict = is_training
lowercase : Union[str, Any] = use_input_lengths
lowercase : Any = use_token_type_ids
lowercase : Optional[Any] = use_labels
lowercase : List[Any] = gelu_activation
lowercase : Tuple = sinusoidal_embeddings
lowercase : Dict = causal
lowercase : Tuple = asm
lowercase : Any = n_langs
lowercase : List[str] = vocab_size
lowercase : List[Any] = n_special
lowercase : Union[str, Any] = hidden_size
lowercase : Optional[int] = num_hidden_layers
lowercase : Optional[int] = num_attention_heads
lowercase : int = hidden_dropout_prob
lowercase : List[str] = attention_probs_dropout_prob
lowercase : Dict = max_position_embeddings
lowercase : str = type_vocab_size
lowercase : List[Any] = type_sequence_label_size
lowercase : Optional[Any] = initializer_range
lowercase : Optional[Any] = num_labels
lowercase : Optional[int] = num_choices
lowercase : int = summary_type
lowercase : List[str] = use_proj
lowercase : Union[str, Any] = scope
def a__ ( self ) -> Dict:
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : List[str] = None
if self.use_input_lengths:
lowercase : str = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Any = None
if self.use_token_type_ids:
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase : str = None
lowercase : List[Any] = None
lowercase : int = None
if self.use_labels:
lowercase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Optional[Any] = ids_tensor([self.batch_size] , 2 ).float()
lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase : Optional[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a__ ( self ) -> List[Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> Any:
lowercase : Dict = FlaubertModel(config=a_ )
model.to(a_ )
model.eval()
lowercase : Tuple = model(a_ , lengths=a_ , langs=a_ )
lowercase : Union[str, Any] = model(a_ , langs=a_ )
lowercase : int = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> Optional[int]:
lowercase : Union[str, Any] = FlaubertWithLMHeadModel(a_ )
model.to(a_ )
model.eval()
lowercase : Optional[Any] = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> Optional[int]:
lowercase : Union[str, Any] = FlaubertForQuestionAnsweringSimple(a_ )
model.to(a_ )
model.eval()
lowercase : List[str] = model(a_ )
lowercase : Union[str, Any] = model(a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> Any:
lowercase : Tuple = FlaubertForQuestionAnswering(a_ )
model.to(a_ )
model.eval()
lowercase : Optional[Any] = model(a_ )
lowercase : str = model(
a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , p_mask=a_ , )
lowercase : Dict = model(
a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , )
(lowercase ) : Any = result_with_labels.to_tuple()
lowercase : Optional[Any] = model(a_ , start_positions=a_ , end_positions=a_ )
(lowercase ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> Optional[Any]:
lowercase : List[Any] = FlaubertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowercase : List[Any] = model(a_ )
lowercase : str = model(a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> str:
lowercase : Dict = self.num_labels
lowercase : int = FlaubertForTokenClassification(a_ )
model.to(a_ )
model.eval()
lowercase : int = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> Any:
lowercase : Optional[int] = self.num_choices
lowercase : Dict = FlaubertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
lowercase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Optional[Any] = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self ) -> Optional[Any]:
lowercase : Dict = self.prepare_config_and_inputs()
(
lowercase
) : List[Any] = config_and_inputs
lowercase : Any = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
_snake_case = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_snake_case = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def a__ ( self , a_ , a_ , a_ , a_ , a_ ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a__ ( self , a_ , a_ , a_=False ) -> int:
lowercase : Tuple = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
lowercase : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def a__ ( self ) -> str:
lowercase : Optional[Any] = FlaubertModelTester(self )
lowercase : str = ConfigTester(self , config_class=a_ , emb_dim=3_7 )
def a__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> Optional[Any]:
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a_ )
def a__ ( self ) -> int:
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a_ )
def a__ ( self ) -> Optional[int]:
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a_ )
def a__ ( self ) -> Optional[Any]:
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a_ )
def a__ ( self ) -> List[Any]:
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a_ )
def a__ ( self ) -> Tuple:
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a_ )
def a__ ( self ) -> List[str]:
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a_ )
@slow
def a__ ( self ) -> Optional[Any]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[Any] = FlaubertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def a__ ( self ) -> Dict:
lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase : int = True
lowercase : Union[str, Any] = model_class(config=a_ )
lowercase : int = self._prepare_for_class(a_ , a_ )
lowercase : List[Any] = torch.jit.trace(
a_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , "traced_model.pt" ) )
lowercase : List[str] = torch.jit.load(os.path.join(a_ , "traced_model.pt" ) , map_location=a_ )
loaded(inputs_dict["input_ids"].to(a_ ) , inputs_dict["attention_mask"].to(a_ ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def a__ ( self ) -> Any:
lowercase : Optional[int] = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
lowercase : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
lowercase : Optional[Any] = model(a_ )[0]
lowercase : List[str] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a_ )
lowercase : Optional[int] = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) ) | 707 |
'''simple docstring'''
import functools
def _A ( A ,A ) -> int:
lowercase : Union[str, Any] = len(A )
lowercase : Dict = len(A )
@functools.cache
def min_distance(A ,A ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowercase : List[str] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 ,A ) ,1 + min_distance(A ,indexa + 1 ) ,diff + min_distance(indexa + 1 ,indexa + 1 ) ,)
return min_distance(0 ,0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 425 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Any = logging.get_logger(__name__)
__A : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A : Tuple = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
__A : Optional[int] = {
'''gpt2''': 1024,
'''gpt2-medium''': 1024,
'''gpt2-large''': 1024,
'''gpt2-xl''': 1024,
'''distilgpt2''': 1024,
}
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : str = VOCAB_FILES_NAMES
lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ['input_ids', 'attention_mask']
lowercase : Dict = GPTaTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="<|endoftext|>" , SCREAMING_SNAKE_CASE_="<|endoftext|>" , SCREAMING_SNAKE_CASE_="<|endoftext|>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Dict = kwargs.pop("""add_bos_token""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCamelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop("""type""" ) )
UpperCamelCase : Any = add_prefix_space
UpperCamelCase : Optional[int] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = add_prefix_space
def a_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) + [self.eos_token_id] )
if len(SCREAMING_SNAKE_CASE_ ) > self.model_max_length:
UpperCamelCase : str = input_ids[-self.model_max_length :]
return input_ids
| 499 |
"""simple docstring"""
import math
import random
def A_ ( snake_case_ : float ,snake_case_ : bool = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__A : int = 0.02
def A_ ( snake_case_ : int ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : List[str] = float(2 * (random.randint(1 ,1_0_0 )) - 1 )
for _ in range(snake_case_ ):
# Forward propagation
UpperCamelCase : Any = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCamelCase : Tuple = (expected / 1_0_0) - layer_a
# Error delta
UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case_ ,snake_case_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_0_0
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Dict = int(input('''Expected value: '''))
__A : List[Any] = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 499 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case = {
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""ConditionalDetrFeatureExtractor"""]
snake_case = ["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 488 |
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [0] * len(lowercase )
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Any = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase ) ):
if indegree[i] == 0:
queue.append(lowercase )
while queue:
SCREAMING_SNAKE_CASE : Optional[Any] = queue.pop(0 )
cnt += 1
topo.append(lowercase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowercase )
if cnt != len(lowercase ):
print("Cycle exists" )
else:
print(lowercase )
# Adjacency List of Graph
snake_case = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 488 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ :
def __init__(self : Any, __UpperCAmelCase : Optional[Any], __UpperCAmelCase : Any=13, __UpperCAmelCase : Optional[Any]=32, __UpperCAmelCase : List[Any]=3, __UpperCAmelCase : Any=4, __UpperCAmelCase : Union[str, Any]=[10, 20, 30, 40], __UpperCAmelCase : Any=[2, 2, 3, 2], __UpperCAmelCase : Any=True, __UpperCAmelCase : int=True, __UpperCAmelCase : Any=37, __UpperCAmelCase : Tuple="gelu", __UpperCAmelCase : Union[str, Any]=10, __UpperCAmelCase : str=0.02, __UpperCAmelCase : Tuple=["stage2", "stage3", "stage4"], __UpperCAmelCase : Tuple=[2, 3, 4], __UpperCAmelCase : Union[str, Any]=None, ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : Any = num_stages
SCREAMING_SNAKE_CASE : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : List[str] = num_labels
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : int = out_features
SCREAMING_SNAKE_CASE : Dict = out_indices
SCREAMING_SNAKE_CASE : Tuple = scope
def lowercase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : str = self.get_config()
return config, pixel_values, labels
def lowercase__ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=__UpperCAmelCase, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def lowercase__ (self : int, __UpperCAmelCase : str, __UpperCAmelCase : Dict, __UpperCAmelCase : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ConvNextVaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(__UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def lowercase__ (self : List[Any], __UpperCAmelCase : Dict, __UpperCAmelCase : Optional[Any], __UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ConvNextVaForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE : str = model(__UpperCAmelCase, labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ (self : str, __UpperCAmelCase : Dict, __UpperCAmelCase : Optional[int], __UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ConvNextVaBackbone(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(__UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Any = ConvNextVaBackbone(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(__UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def lowercase__ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
def lowercase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class a__ ( _lowercase, _lowercase, unittest.TestCase ):
__magic_name__ : Optional[int] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__magic_name__ : Tuple = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ : List[Any] = False
__magic_name__ : int = False
__magic_name__ : Optional[int] = False
__magic_name__ : int = False
__magic_name__ : List[str] = False
def lowercase__ (self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = ConvNextVaModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self, config_class=__UpperCAmelCase, has_text_modality=__UpperCAmelCase, hidden_size=37 )
def lowercase__ (self : int ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def lowercase__ (self : Any ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def lowercase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def lowercase__ (self : List[Any] ) -> int:
"""simple docstring"""
pass
def lowercase__ (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_with_labels()
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if model_class.__name__ in [
*get_values(__UpperCAmelCase ),
*get_values(__UpperCAmelCase ),
]:
continue
SCREAMING_SNAKE_CASE : str = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(__UpperCAmelCase, __UpperCAmelCase, return_labels=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = model(**__UpperCAmelCase ).loss
loss.backward()
def lowercase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : str = True
if (
model_class.__name__
in [*get_values(__UpperCAmelCase ), *get_values(__UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE : List[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(__UpperCAmelCase, __UpperCAmelCase, return_labels=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = model(**__UpperCAmelCase ).loss
loss.backward()
def lowercase__ (self : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __UpperCAmelCase )
def lowercase__ (self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase__ (self : int ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(__UpperCAmelCase : str, __UpperCAmelCase : Tuple, __UpperCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(__UpperCAmelCase, __UpperCAmelCase ) )
SCREAMING_SNAKE_CASE : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : str = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ), expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = True
check_hidden_states_output(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
def lowercase__ (self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def lowercase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = ConvNextVaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __lowercase ():
SCREAMING_SNAKE_CASE : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def lowercase__ (self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def lowercase__ (self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : int = preprocessor(images=__UpperCAmelCase, return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**__UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, __UpperCAmelCase )
SCREAMING_SNAKE_CASE : str = torch.tensor([0.9996, 0.1966, -0.4386] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __UpperCAmelCase, atol=1e-4 ) )
| 507 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 507 | 1 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=sys.maxsize ) -> Optional[int]:
lowerCamelCase_ = "bilinear"
lowerCamelCase_ = max_size
lowerCamelCase_ = short_edge_length
def __call__( self , lowercase ) -> Any:
lowerCamelCase_ = []
for img in imgs:
lowerCamelCase_ , lowerCamelCase_ = img.shape[:2]
# later: provide list and randomly choose index for resize
lowerCamelCase_ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowerCamelCase_ = size * 1.0 / min(lowercase , lowercase )
if h < w:
lowerCamelCase_ , lowerCamelCase_ = size, scale * w
else:
lowerCamelCase_ , lowerCamelCase_ = scale * h, size
if max(lowercase , lowercase ) > self.max_size:
lowerCamelCase_ = self.max_size * 1.0 / max(lowercase , lowercase )
lowerCamelCase_ = newh * scale
lowerCamelCase_ = neww * scale
lowerCamelCase_ = int(neww + 0.5 )
lowerCamelCase_ = int(newh + 0.5 )
if img.dtype == np.uinta:
lowerCamelCase_ = Image.fromarray(lowercase )
lowerCamelCase_ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowerCamelCase_ = np.asarray(lowercase )
else:
lowerCamelCase_ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowerCamelCase_ = nn.functional.interpolate(
lowercase , (newh, neww) , mode=self.interp_method , align_corners=lowercase ).squeeze(0 )
img_augs.append(lowercase )
return img_augs
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase ) -> Tuple:
lowerCamelCase_ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowerCamelCase_ = cfg.INPUT.FORMAT
lowerCamelCase_ = cfg.SIZE_DIVISIBILITY
lowerCamelCase_ = cfg.PAD_VALUE
lowerCamelCase_ = cfg.INPUT.MAX_SIZE_TEST
lowerCamelCase_ = cfg.MODEL.DEVICE
lowerCamelCase_ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCamelCase_ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowerCamelCase_ = lambda lowercase : (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
lowerCamelCase_ = tuple(max(lowercase ) for s in zip(*[img.shape for img in images] ) )
lowerCamelCase_ = [im.shape[-2:] for im in images]
lowerCamelCase_ = [
nn.functional.pad(
lowercase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowercase , lowercase )
]
return torch.stack(lowercase ), torch.tensor(lowercase )
def __call__( self , lowercase , lowercase=False ) -> Union[str, Any]:
with torch.no_grad():
if not isinstance(lowercase , lowercase ):
lowerCamelCase_ = [images]
if single_image:
assert len(lowercase ) == 1
for i in range(len(lowercase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowercase , images.pop(lowercase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowercase , torch.as_tensor(img_tensorize(images.pop(lowercase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowerCamelCase_ = torch.tensor([im.shape[:2] for im in images] )
lowerCamelCase_ = self.aug(lowercase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowerCamelCase_ = [self.normalizer(lowercase ) for x in images]
# now pad them to do the following operations
lowerCamelCase_ , lowerCamelCase_ = self.pad(lowercase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowerCamelCase_ = torch.true_divide(lowercase , lowercase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
assert torch.isfinite(lowerCamelCase__ ).all(), "Box tensor contains infinite or NaN!"
lowerCamelCase_ , lowerCamelCase_ = box_size
tensor[:, 0].clamp_(min=0 , max=lowerCamelCase__ )
tensor[:, 1].clamp_(min=0 , max=lowerCamelCase__ )
tensor[:, 2].clamp_(min=0 , max=lowerCamelCase__ )
tensor[:, 3].clamp_(min=0 , max=lowerCamelCase__ )
| 313 |
from math import factorial
__A ={str(digit): factorial(digit) for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( lowerCamelCase__ = 6_0 , lowerCamelCase__ = 1_0_0_0_0_0_0 ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
lowerCamelCase_ = 0
# the cached sizes of the previous chains
lowerCamelCase_ = {}
for start_chain_element in range(1 , lowerCamelCase__ ):
# The temporary set will contain the elements of the chain
lowerCamelCase_ = set()
lowerCamelCase_ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase_ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowerCamelCase__ )
chain_set_length += 1
lowerCamelCase_ = digit_factorial_sum(lowerCamelCase__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase_ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""")
| 313 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class snake_case__ ( __A ):
UpperCAmelCase : Any = """nllb-moe"""
UpperCAmelCase : Any = ["""past_key_values"""]
UpperCAmelCase : Any = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , UpperCamelCase_=128112 , UpperCamelCase_=1024 , UpperCamelCase_=12 , UpperCamelCase_=4096 , UpperCamelCase_=16 , UpperCamelCase_=12 , UpperCamelCase_=4096 , UpperCamelCase_=16 , UpperCamelCase_=0.05 , UpperCamelCase_=0.05 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=1024 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.02 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_="float32" , UpperCamelCase_=False , UpperCamelCase_=128 , UpperCamelCase_=64 , UpperCamelCase_=4 , UpperCamelCase_=4 , UpperCamelCase_=0.001 , UpperCamelCase_=0.001 , UpperCamelCase_="all" , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=1.0 , UpperCamelCase_=0.2 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=False , **UpperCamelCase_ , ) -> List[Any]:
"""simple docstring"""
a_ : Union[str, Any] = vocab_size
a_ : Union[str, Any] = max_position_embeddings
a_ : List[Any] = d_model
a_ : Dict = encoder_ffn_dim
a_ : List[str] = encoder_layers
a_ : Dict = encoder_attention_heads
a_ : Union[str, Any] = decoder_ffn_dim
a_ : List[str] = decoder_layers
a_ : Optional[int] = decoder_attention_heads
a_ : Any = dropout
a_ : str = attention_dropout
a_ : Any = activation_dropout
a_ : Tuple = activation_function
a_ : Optional[Any] = init_std
a_ : int = encoder_layerdrop
a_ : Union[str, Any] = decoder_layerdrop
a_ : int = use_cache
a_ : Optional[int] = encoder_layers
a_ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
a_ : Optional[Any] = router_z_loss_coef
a_ : int = router_aux_loss_coef
a_ : List[Any] = decoder_sparse_step
a_ : Optional[Any] = encoder_sparse_step
a_ : List[str] = num_experts
a_ : int = expert_capacity
a_ : Optional[Any] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
a_ : Union[str, Any] = router_dtype
a_ : Optional[int] = router_ignore_padding_tokens
a_ : Any = batch_prioritized_routing
a_ : Dict = second_expert_policy
a_ : str = normalize_router_prob_before_dropping
a_ : str = moe_eval_capacity_token_fraction
a_ : Optional[Any] = moe_token_dropout
a_ : Union[str, Any] = output_router_logits
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 419 |
from __future__ import annotations
from cmath import sqrt
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
a_ : Any = b * b - 4 * a * c
a_ : List[str] = (-b + sqrt(SCREAMING_SNAKE_CASE_ )) / (2 * a)
a_ : Union[str, Any] = (-b - sqrt(SCREAMING_SNAKE_CASE_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _lowerCamelCase ( ):
"""simple docstring"""
a_ , a_ : str = quadratic_roots(a=5 , b=6 , c=1 )
print(F"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 419 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 472 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = StableDiffusionPanoramaPipeline
a = TEXT_TO_IMAGE_PARAMS
a = TEXT_TO_IMAGE_BATCH_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
a = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ = DDIMScheduler()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str]=0 ) -> Any:
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe(**__lowerCamelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Any ) -> Optional[Any]:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self : List[Any] ) -> List[Any]:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''french fries'''
SCREAMING_SNAKE_CASE__ = sd_pipe(**__lowerCamelCase , negative_prompt=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe(**__lowerCamelCase , view_batch_size=2 )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe(**__lowerCamelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe(**__lowerCamelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Tuple ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Any , __lowerCamelCase : Dict=0 ) -> Dict:
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = '''stabilityai/stable-diffusion-2-base'''
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_pretrained(__lowerCamelCase , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline.from_pretrained(__lowerCamelCase , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = self.get_inputs()
SCREAMING_SNAKE_CASE__ = pipe(**__lowerCamelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def lowercase_ ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = self.get_inputs()
SCREAMING_SNAKE_CASE__ = pipe(**__lowerCamelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase_ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = 0
def callback_fn(__lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : torch.FloatTensor ) -> None:
SCREAMING_SNAKE_CASE__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
SCREAMING_SNAKE_CASE__ = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
SCREAMING_SNAKE_CASE__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
SCREAMING_SNAKE_CASE__ = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = '''stabilityai/stable-diffusion-2-base'''
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_pretrained(__lowerCamelCase , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline.from_pretrained(__lowerCamelCase , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = self.get_inputs()
pipe(**__lowerCamelCase , callback=__lowerCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase_ ( self : Tuple ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ = '''stabilityai/stable-diffusion-2-base'''
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_pretrained(__lowerCamelCase , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline.from_pretrained(__lowerCamelCase , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = self.get_inputs()
SCREAMING_SNAKE_CASE__ = pipe(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 472 | 1 |
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Tuple = n
UpperCamelCase : List[Any] = [None] * self.n
UpperCamelCase : str = 0 # index of the first element
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Dict = 0
def __len__( self ) -> int:
return self.size
def snake_case_ ( self ) -> bool:
return self.size == 0
def snake_case_ ( self ) -> str:
return False if self.is_empty() else self.array[self.front]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
UpperCamelCase : Union[str, Any] = data
UpperCamelCase : int = (self.rear + 1) % self.n
self.size += 1
return self
def snake_case_ ( self ) -> Dict:
if self.size == 0:
raise Exception('UNDERFLOW' )
UpperCamelCase : Tuple = self.array[self.front]
UpperCamelCase : str = None
UpperCamelCase : Tuple = (self.front + 1) % self.n
self.size -= 1
return temp
| 40 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : Any = """deta"""
lowercase_ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=None, lowerCamelCase=9_00, lowerCamelCase=20_48, lowerCamelCase=6, lowerCamelCase=20_48, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=True, lowerCamelCase=3_00, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, **lowerCamelCase, ) -> Any:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'])
else:
if isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : Dict = backbone_config.pop('model_type')
_lowercase : int = CONFIG_MAPPING[backbone_model_type]
_lowercase : Union[str, Any] = config_class.from_dict(lowerCamelCase)
_lowercase : Union[str, Any] = backbone_config
_lowercase : Any = num_queries
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Union[str, Any] = d_model
_lowercase : Optional[int] = encoder_ffn_dim
_lowercase : Optional[int] = encoder_layers
_lowercase : Optional[Any] = encoder_attention_heads
_lowercase : Optional[Any] = decoder_ffn_dim
_lowercase : Dict = decoder_layers
_lowercase : Tuple = decoder_attention_heads
_lowercase : Union[str, Any] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Tuple = activation_function
_lowercase : List[Any] = init_std
_lowercase : Union[str, Any] = init_xavier_std
_lowercase : int = encoder_layerdrop
_lowercase : Optional[int] = auxiliary_loss
_lowercase : Dict = position_embedding_type
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : str = encoder_n_points
_lowercase : Any = decoder_n_points
_lowercase : List[str] = two_stage
_lowercase : Dict = two_stage_num_proposals
_lowercase : Any = with_box_refine
_lowercase : List[Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : List[Any] = class_cost
_lowercase : Optional[int] = bbox_cost
_lowercase : str = giou_cost
# Loss coefficients
_lowercase : Optional[int] = mask_loss_coefficient
_lowercase : int = dice_loss_coefficient
_lowercase : List[Any] = bbox_loss_coefficient
_lowercase : Optional[Any] = giou_loss_coefficient
_lowercase : str = eos_coefficient
_lowercase : int = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = copy.deepcopy(self.__dict__)
_lowercase : Optional[int] = self.backbone_config.to_dict()
_lowercase : Optional[Any] = self.__class__.model_type
return output
| 89 | 0 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
a = parser.parse_args()
if args.model_type == "roberta":
a = RobertaForMaskedLM.from_pretrained(args.model_name)
a = """roberta"""
elif args.model_type == "gpt2":
a = GPTaLMHeadModel.from_pretrained(args.model_name)
a = """transformer"""
a = model.state_dict()
a = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a = state_dict[F'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a = F'''{prefix}.embeddings.{w}.weight'''
a = state_dict[param_name]
for w in ["weight", "bias"]:
a = F'''{prefix}.embeddings.LayerNorm.{w}'''
a = state_dict[param_name]
# Transformer Blocks #
a = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a = state_dict[
F'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
a = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a = state_dict[F'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
a = state_dict[F'''lm_head.dense.{w}''']
a = state_dict[F'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a = state_dict[F'''{prefix}.ln_f.{w}''']
a = state_dict["""lm_head.weight"""]
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint) | 382 |
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = [0 for i in range(len(__magic_name__ ) )]
# initialize interval's left pointer and right pointer
_lowerCAmelCase , _lowerCAmelCase :List[Any] = 0, 0
for i in range(1 , len(__magic_name__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
_lowerCAmelCase :Any = min(right_pointer - i + 1 , z_result[i - left_pointer] )
_lowerCAmelCase :Any = min_edge
while go_next(__magic_name__ , __magic_name__ , __magic_name__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_lowerCAmelCase , _lowerCAmelCase :List[Any] = i, i + z_result[i] - 1
return z_result
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : str ):
"""simple docstring"""
return i + z_result[i] < len(__magic_name__ ) and s[z_result[i]] == s[i + z_result[i]]
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :int = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_lowerCAmelCase :Optional[Any] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__magic_name__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 382 | 1 |
class a :
"""simple docstring"""
def __init__( self : List[str] ) -> None:
__UpperCAmelCase : dict[str, TrieNode] = {} # Mapping from char to TrieNode
__UpperCAmelCase : List[str] = False
def UpperCAmelCase ( self : str , __lowercase : list[str] ) -> None:
for word in words:
self.insert(__lowercase )
def UpperCAmelCase ( self : int , __lowercase : str ) -> None:
__UpperCAmelCase : List[Any] = self
for char in word:
if char not in curr.nodes:
__UpperCAmelCase : List[str] = TrieNode()
__UpperCAmelCase : Any = curr.nodes[char]
__UpperCAmelCase : Dict = True
def UpperCAmelCase ( self : List[Any] , __lowercase : str ) -> bool:
__UpperCAmelCase : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
__UpperCAmelCase : Any = curr.nodes[char]
return curr.is_leaf
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : str ) -> None:
def _delete(__lowercase : TrieNode , __lowercase : str , __lowercase : int ) -> bool:
if index == len(__lowercase ):
# If word does not exist
if not curr.is_leaf:
return False
__UpperCAmelCase : Union[str, Any] = False
return len(curr.nodes ) == 0
__UpperCAmelCase : List[Any] = word[index]
__UpperCAmelCase : int = curr.nodes.get(__lowercase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__UpperCAmelCase : Any = _delete(__lowercase , __lowercase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __lowercase , 0 )
def lowerCamelCase__ ( __lowerCamelCase : TrieNode , __lowerCamelCase : str ):
if node.is_leaf:
print(__lowerCamelCase , end=""" """ )
for key, value in node.nodes.items():
print_words(__lowerCamelCase , word + key )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Optional[int] = """banana bananas bandana band apple all beast""".split()
__UpperCAmelCase : int = TrieNode()
root.insert_many(__lowerCamelCase )
# print_words(root, "")
assert all(root.find(__lowerCamelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : bool ):
print(str(__lowerCamelCase ) , """works!""" if passes else """doesn't work :(""" )
def lowerCamelCase__ ( ):
assert test_trie()
def lowerCamelCase__ ( ):
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 63 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class __lowercase ( __lowerCamelCase ):
snake_case_ = ["""input_features""", """attention_mask"""]
def __init__( self : Any ,A : str=80 ,A : Optional[int]=16_000 ,A : int=0.0 ,A : str=10 ,A : Any=25 ,A : str="hamming_window" ,A : int=3_2_7_6_8.0 ,A : List[str]=0.9_7 ,A : Optional[int]=1.0 ,A : Optional[Any]=True ,A : Tuple=True ,A : Any=False ,**A : int ,):
'''simple docstring'''
super().__init__(feature_size=A ,sampling_rate=A ,padding_value=A ,**A )
UpperCAmelCase__ : str = feature_size
UpperCAmelCase__ : int = sampling_rate
UpperCAmelCase__ : int = padding_value
UpperCAmelCase__ : Dict = hop_length
UpperCAmelCase__ : int = win_length
UpperCAmelCase__ : Dict = frame_signal_scale
UpperCAmelCase__ : Dict = preemphasis_coeff
UpperCAmelCase__ : str = mel_floor
UpperCAmelCase__ : Any = normalize_means
UpperCAmelCase__ : str = normalize_vars
UpperCAmelCase__ : int = win_function
UpperCAmelCase__ : List[Any] = return_attention_mask
UpperCAmelCase__ : str = win_length * sampling_rate // 1_000
UpperCAmelCase__ : List[Any] = hop_length * sampling_rate // 1_000
UpperCAmelCase__ : int = optimal_fft_length(self.sample_size )
UpperCAmelCase__ : List[Any] = (self.n_fft // 2) + 1
def __lowercase ( self : Union[str, Any] ,A : np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=A )
else:
UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function )
UpperCAmelCase__ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
UpperCAmelCase__ : Optional[Any] = spectrogram(
one_waveform * self.frame_signal_scale ,window=A ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=A ,preemphasis=self.preemphasis_coeff ,mel_filters=A ,mel_floor=self.mel_floor ,log_mel="""log""" ,)
return msfc_features.T
def __lowercase ( self : str ,A : Any ,A : Optional[int] ,A : str ):
'''simple docstring'''
# make sure we normalize float32 arrays
if self.normalize_means:
UpperCAmelCase__ : Optional[Any] = x[:input_length].mean(axis=0 )
UpperCAmelCase__ : Any = np.subtract(A ,A )
if self.normalize_vars:
UpperCAmelCase__ : str = x[:input_length].std(axis=0 )
UpperCAmelCase__ : Optional[int] = np.divide(A ,A )
if input_length < x.shape[0]:
UpperCAmelCase__ : int = padding_value
# make sure array is in float32
UpperCAmelCase__ : str = x.astype(np.floataa )
return x
def __lowercase ( self : Union[str, Any] ,A : List[np.ndarray] ,A : Optional[np.ndarray] = None ):
'''simple docstring'''
UpperCAmelCase__ : Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(A ,A ,self.padding_value ) for x, n in zip(A ,A )]
def __call__( self : Union[str, Any] ,A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A : Union[bool, str, PaddingStrategy] = False ,A : Optional[int] = None ,A : bool = False ,A : Optional[int] = None ,A : Optional[bool] = None ,A : Optional[Union[str, TensorType]] = None ,A : Optional[int] = None ,**A : Tuple ,):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCAmelCase__ : Optional[Any] = isinstance(A ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
UpperCAmelCase__ : Any = is_batched_numpy or (
isinstance(A ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase__ : List[str] = [np.asarray(A ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A ,np.ndarray ):
UpperCAmelCase__ : Union[str, Any] = np.asarray(A ,dtype=np.floataa )
elif isinstance(A ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ : Optional[Any] = [raw_speech]
# extract fbank features
UpperCAmelCase__ : Tuple = [self._extract_mfsc_features(A ) for one_waveform in raw_speech]
# convert into correct format for padding
UpperCAmelCase__ : str = BatchFeature({"""input_features""": features} )
UpperCAmelCase__ : Optional[Any] = self.pad(
A ,padding=A ,max_length=A ,truncation=A ,pad_to_multiple_of=A ,return_attention_mask=A ,**A ,)
# make sure list is in array format
UpperCAmelCase__ : Tuple = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] ,A ):
UpperCAmelCase__ : Union[str, Any] = [np.asarray(A ,dtype=np.floataa ) for feature in input_features]
UpperCAmelCase__ : Dict = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
UpperCAmelCase__ : str = [np.asarray(A ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
UpperCAmelCase__ : Union[str, Any] = (
np.array(A ,dtype=np.intaa )
if self._get_padding_strategies(A ,max_length=A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
UpperCAmelCase__ : Any = self.normalize(
padded_inputs["""input_features"""] ,attention_mask=A )
if return_tensors is not None:
UpperCAmelCase__ : Union[str, Any] = padded_inputs.convert_to_tensors(A )
return padded_inputs
| 65 | 0 |
"""simple docstring"""
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133 |
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : int = Mock()
lowerCamelCase : int = conn, Mock()
lowerCamelCase : Any = iter([1, None] )
lowerCamelCase : Dict = lambda a_ : next(a_ )
# ===== invoke =====
send_file(filename='mytext.txt', testing=a_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 133 | 1 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , "num_attention_heads" ) )
class lowercase__ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : int=13 , _UpperCAmelCase : Union[str, Any]=64 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : Dict=[128, 256, 384] , _UpperCAmelCase : int=[4, 6, 8] , _UpperCAmelCase : Dict=[2, 3, 4] , _UpperCAmelCase : Union[str, Any]=[16, 16, 16] , _UpperCAmelCase : int=0 , _UpperCAmelCase : Tuple=[2, 2, 2] , _UpperCAmelCase : Tuple=[2, 2, 2] , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : str=2 , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = kernel_size
UpperCAmelCase_ = stride
UpperCAmelCase_ = padding
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = depths
UpperCAmelCase_ = key_dim
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = attention_ratio
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = initializer_range
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = LevitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
UpperCAmelCase_ = (self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCAmelCase_ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCAmelCase_ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def lowercase__ ( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = LevitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = LevitModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not output attentions" )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ):
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = len(self.model_tester.depths ) + 1
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
UpperCAmelCase_ = (self.model_tester.image_size, self.model_tester.image_size)
UpperCAmelCase_ , UpperCAmelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCAmelCase_ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCAmelCase_ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
def lowercase__ ( self : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple=False ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCAmelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ = False
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCAmelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
UpperCAmelCase_ = problem_type["title"]
UpperCAmelCase_ = problem_type["num_labels"]
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if problem_type["num_labels"] > 1:
UpperCAmelCase_ = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
UpperCAmelCase_ = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_UpperCAmelCase ) as warning_list:
UpperCAmelCase_ = model(**_UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = LevitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
UpperCAmelCase_ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor([1.0448, -0.3745, -1.8317] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 82 |
import math
from numpy import inf
from scipy.integrate import quad
def SCREAMING_SNAKE_CASE_ ( __A : float ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError('math domain error' )
return quad(__A , 0 , __A , args=(__A) )[0]
def SCREAMING_SNAKE_CASE_ ( __A : float , __A : float ) -> float:
"""simple docstring"""
return math.pow(__A , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 570 | 0 |
'''simple docstring'''
def A__ ( __lowerCamelCase, __lowerCamelCase ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(1_00, 0.25) = }""")
print(F"""{price_plus_tax(125.50, 0.05) = }""")
| 714 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["image_processor", "tokenizer"]
UpperCAmelCase_ ="Pix2StructImageProcessor"
UpperCAmelCase_ =("T5Tokenizer", "T5TokenizerFast")
def __init__( self , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = False
super().__init__(_A , _A )
def __call__( self , _A=None , _A = None , _A = True , _A = False , _A = None , _A = None , _A = 2048 , _A = 0 , _A = None , _A = None , _A = False , _A = False , _A = False , _A = False , _A = False , _A = True , _A = None , **_A , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ = self.tokenizer
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
SCREAMING_SNAKE_CASE_ = self.image_processor(
_A , return_tensors=_A , max_patches=_A , **_A )
else:
# add pixel_values and bbox
SCREAMING_SNAKE_CASE_ = self.image_processor(
_A , return_tensors=_A , max_patches=_A , header_text=_A , **_A )
if text is not None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
if "attention_mask" in text_encoding:
SCREAMING_SNAKE_CASE_ = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
SCREAMING_SNAKE_CASE_ = text_encoding.pop('''input_ids''' )
else:
SCREAMING_SNAKE_CASE_ = None
if text_encoding is not None:
encoding_image_processor.update(_A )
return encoding_image_processor
def _UpperCamelCase ( self , *_A , **_A ) -> int:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> List[str]:
return self.tokenizer.decode(*_A , **_A )
@property
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 597 | 0 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ""
for word_or_phrase in separated:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 636 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__a : List[str] = random.Random()
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
if rng is None:
lowercase__ : Optional[Any] = global_rng
lowercase__ : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=80 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase="hann_window" , lowerCamelCase=80 , lowerCamelCase=7600 , lowerCamelCase=1E-10 , lowerCamelCase=True , ) -> int:
"""simple docstring"""
lowercase__ : Optional[int] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Dict = min_seq_length
lowercase__ : Optional[int] = max_seq_length
lowercase__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase__ : List[Any] = feature_size
lowercase__ : Union[str, Any] = padding_value
lowercase__ : Dict = sampling_rate
lowercase__ : int = do_normalize
lowercase__ : Union[str, Any] = num_mel_bins
lowercase__ : Optional[Any] = hop_length
lowercase__ : Tuple = win_length
lowercase__ : Any = win_function
lowercase__ : Optional[Any] = fmin
lowercase__ : str = fmax
lowercase__ : Union[str, Any] = mel_floor
lowercase__ : str = return_attention_mask
def __a ( self ) -> Any:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __a ( self , lowerCamelCase=False , lowerCamelCase=False ) -> List[str]:
"""simple docstring"""
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
lowercase__ : Optional[int] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowercase__ : List[str] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase__ : Dict = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
def __a ( self , lowerCamelCase=False , lowerCamelCase=False ) -> Optional[int]:
"""simple docstring"""
if equal_length:
lowercase__ : Union[str, Any] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase__ : Tuple = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase__ : List[str] = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class UpperCAmelCase( snake_case_ , unittest.TestCase ):
"""simple docstring"""
a : List[Any] = SpeechTaFeatureExtractor
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any] = SpeechTaFeatureExtractionTester(self )
def __a ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowerCamelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase , axis=0 ) - 1 ) < 1E-3 ) )
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : str = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
lowercase__ : int = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
lowercase__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
lowercase__ : Optional[int] = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
lowercase__ : Union[str, Any] = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : Any = ["longest", "max_length", "do_not_pad"]
lowercase__ : List[Any] = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase , lowerCamelCase ):
lowercase__ : Optional[int] = feat_extract(lowerCamelCase , padding=lowerCamelCase , max_length=lowerCamelCase , return_tensors="np" )
lowercase__ : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Dict = range(800 , 1400 , 200 )
lowercase__ : List[str] = [floats_list((1, x) )[0] for x in lengths]
lowercase__ : Tuple = ["longest", "max_length", "do_not_pad"]
lowercase__ : str = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase , lowerCamelCase ):
lowercase__ : List[str] = feat_extract(lowerCamelCase , max_length=lowerCamelCase , padding=lowerCamelCase )
lowercase__ : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : Tuple = feat_extract(
lowerCamelCase , truncation=lowerCamelCase , max_length=1000 , padding="max_length" , return_tensors="np" )
lowercase__ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : Tuple = feat_extract(
lowerCamelCase , truncation=lowerCamelCase , max_length=1000 , padding="longest" , return_tensors="np" )
lowercase__ : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
lowercase__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : Union[str, Any] = feat_extract(
lowerCamelCase , truncation=lowerCamelCase , max_length=2000 , padding="longest" , return_tensors="np" )
lowercase__ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Tuple = np.random.rand(100 ).astype(np.floataa )
lowercase__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase__ : Tuple = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowercase__ : Dict = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __a ( self ) -> str:
"""simple docstring"""
lowercase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase__ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__ : List[str] = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
lowercase__ : str = feature_extractor(audio_target=lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowercase__ : Union[str, Any] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
lowercase__ : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
lowercase__ : Dict = feature_extractor(lowerCamelCase , return_tensors="np" ).input_values
lowercase__ : List[str] = feature_extractor(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase__ : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase__ : Optional[Any] = np.asarray(lowerCamelCase )
lowercase__ : List[Any] = feature_extractor(lowerCamelCase , return_tensors="np" ).input_values
lowercase__ : List[str] = feature_extractor(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def __a ( self ) -> str:
"""simple docstring"""
lowercase__ : Dict = self.feat_extract_tester.prepare_inputs_for_target()
lowercase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ : Dict = feat_extract.model_input_names[0]
lowercase__ : int = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCamelCase ) == len(lowerCamelCase ) for x, y in zip(lowerCamelCase , processed_features[input_name] ) ) )
lowercase__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase )
lowercase__ : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
lowercase__ : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase__ : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : Dict = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase )
lowercase__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ : Optional[Any] = feat_extract.model_input_names[0]
lowercase__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
lowercase__ : List[str] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase__ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
lowercase__ : Optional[Any] = feat_extract.model_input_names[0]
lowercase__ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
lowercase__ : Optional[int] = feat_extract.num_mel_bins # hack!
lowercase__ : Optional[int] = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )[input_name]
lowercase__ : Optional[int] = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = self.feat_extract_dict
lowercase__ : int = True
lowercase__ : Optional[Any] = self.feature_extraction_class(**lowerCamelCase )
lowercase__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
lowercase__ : Union[str, Any] = [len(lowerCamelCase ) for x in speech_inputs]
lowercase__ : Any = feat_extract.model_input_names[0]
lowercase__ : Optional[int] = BatchFeature({input_name: speech_inputs} )
lowercase__ : int = feat_extract.num_mel_bins # hack!
lowercase__ : int = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCamelCase )
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ : List[Any] = self.feat_extract_dict
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = self.feature_extraction_class(**lowerCamelCase )
lowercase__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
lowercase__ : List[str] = [len(lowerCamelCase ) for x in speech_inputs]
lowercase__ : Any = feat_extract.model_input_names[0]
lowercase__ : Dict = BatchFeature({input_name: speech_inputs} )
lowercase__ : int = min(lowerCamelCase )
lowercase__ : List[str] = feat_extract.num_mel_bins # hack!
lowercase__ : Dict = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , truncation=lowerCamelCase , return_tensors="np" )
self.assertIn("attention_mask" , lowerCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __a ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
from datasets import load_dataset
lowercase__ : Any = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowercase__ : int = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
lowercase__ : List[Any] = self._load_datasamples(1 )
lowercase__ : int = SpeechTaFeatureExtractor()
lowercase__ : Tuple = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCamelCase , atol=1E-6 ) )
def __a ( self ) -> int:
"""simple docstring"""
lowercase__ : Optional[int] = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
lowercase__ : Any = self._load_datasamples(1 )
lowercase__ : List[Any] = SpeechTaFeatureExtractor()
lowercase__ : int = feature_extractor(audio_target=lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) ) | 397 | 0 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class lowerCAmelCase_ :
def __init__( self ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : int = value
SCREAMING_SNAKE_CASE_ : int = None # Added in order to delete a node easier
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'{self.value}': (self.left, self.right)} ,indent=1 )
class lowerCAmelCase_ :
def __init__( self ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Tuple = root
def __str__( self ):
return str(self.root )
def snake_case ( self ,snake_case__ ,snake_case__ ):
if new_children is not None: # reset its kids
SCREAMING_SNAKE_CASE_ : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(snake_case__ ): # If it is the right children
SCREAMING_SNAKE_CASE_ : str = new_children
else:
SCREAMING_SNAKE_CASE_ : str = new_children
else:
SCREAMING_SNAKE_CASE_ : Tuple = new_children
def snake_case ( self ,snake_case__ ):
if node.parent and node.parent.right:
return node == node.parent.right
return False
def snake_case ( self ):
return self.root is None
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = Node(snake_case__ ) # create a new Node
if self.empty(): # if Tree is empty
SCREAMING_SNAKE_CASE_ : int = new_node # set its root
else: # Tree is not empty
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = parent_node.left
else:
if parent_node.right is None:
SCREAMING_SNAKE_CASE_ : Tuple = new_node
break
else:
SCREAMING_SNAKE_CASE_ : int = parent_node.right
SCREAMING_SNAKE_CASE_ : Any = parent_node
def snake_case ( self ,*snake_case__ ):
for value in values:
self.__insert(snake_case__ )
def snake_case ( self ,snake_case__ ):
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.' )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
SCREAMING_SNAKE_CASE_ : int = node.left if value < node.value else node.right
return node
def snake_case ( self ,snake_case__ = None ):
if node is None:
if self.root is None:
return None
SCREAMING_SNAKE_CASE_ : int = self.root
if not self.empty():
while node.right is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = node.right
return node
def snake_case ( self ,snake_case__ = None ):
if node is None:
SCREAMING_SNAKE_CASE_ : Dict = self.root
if self.root is None:
return None
if not self.empty():
SCREAMING_SNAKE_CASE_ : List[Any] = self.root
while node.left is not None:
SCREAMING_SNAKE_CASE_ : Any = node.left
return node
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = self.search(snake_case__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(snake_case__ ,snake_case__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(snake_case__ ,node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(snake_case__ ,node.left )
else:
SCREAMING_SNAKE_CASE_ : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def snake_case ( self ,snake_case__ ):
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def snake_case ( self ,snake_case__=None ):
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def snake_case ( self ,snake_case__ ,snake_case__ ):
if node:
self.inorder(snake_case__ ,node.left )
arr.append(node.value )
self.inorder(snake_case__ ,node.right )
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
self.inorder(snake_case__ ,snake_case__ ) # append all values to list using inorder traversal
return arr[k - 1]
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = []
if curr_node is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
SCREAMING_SNAKE_CASE_ : List[Any] = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase__ )
# Prints all the elements of the list in order traversal
print(UpperCamelCase__ )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase__ )
print(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 720 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.