code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
return TrainCommand(SCREAMING_SNAKE_CASE__ )
class snake_case_ ( a_ ):
@staticmethod
def snake_case_ ( a_ ):
a_ : Optional[Any] = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=a_ , required=a_ , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=a_ , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=a_ , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=a_ , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=a_ , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=a_ , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=a_ , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=a_ , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=a_ , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=a_ , default=3_2 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=a_ , default=6_4 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=a_ , default=3e-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=a_ , default=1e-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=a_ )
def __init__( self , a_ ):
a_ : List[str] = logging.get_logger("transformers-cli/training" )
a_ : Dict = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=a_ )
a_ : Tuple = args.output
a_ : List[str] = args.column_label
a_ : Tuple = args.column_text
a_ : Any = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
a_ : Tuple = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
a_ : Any = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a_ : Tuple = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
a_ : int = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a_ : Optional[int] = args.validation_split
a_ : int = args.train_batch_size
a_ : List[str] = args.valid_batch_size
a_ : Dict = args.learning_rate
a_ : Any = args.adam_epsilon
def snake_case_ ( self ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def snake_case_ ( self ):
raise NotImplementedError
def snake_case_ ( self ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 237 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class snake_case_ ( a_ ):
__lowerCAmelCase = "efficientnet"
def __init__( self , a_ = 3 , a_ = 6_0_0 , a_ = 2.0 , a_ = 3.1 , a_ = 8 , a_ = [3, 3, 5, 3, 5, 5, 3] , a_ = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , a_ = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , a_ = [] , a_ = [1, 2, 2, 2, 1, 2, 1] , a_ = [1, 2, 2, 3, 3, 4, 1] , a_ = [1, 6, 6, 6, 6, 6, 6] , a_ = 0.25 , a_ = "swish" , a_ = 2_5_6_0 , a_ = "mean" , a_ = 0.02 , a_ = 0.001 , a_ = 0.99 , a_ = 0.5 , a_ = 0.2 , **a_ , ):
super().__init__(**a_ )
a_ : Tuple = num_channels
a_ : Any = image_size
a_ : Dict = width_coefficient
a_ : int = depth_coefficient
a_ : Optional[Any] = depth_divisor
a_ : Optional[int] = kernel_sizes
a_ : Dict = in_channels
a_ : Dict = out_channels
a_ : Union[str, Any] = depthwise_padding
a_ : Any = strides
a_ : Optional[int] = num_block_repeats
a_ : Tuple = expand_ratios
a_ : str = squeeze_expansion_ratio
a_ : Optional[int] = hidden_act
a_ : List[str] = hidden_dim
a_ : int = pooling_type
a_ : Optional[int] = initializer_range
a_ : List[Any] = batch_norm_eps
a_ : Tuple = batch_norm_momentum
a_ : List[Any] = dropout_rate
a_ : Dict = drop_connect_rate
a_ : str = sum(a_ ) * 4
class snake_case_ ( a_ ):
__lowerCAmelCase = version.parse("1.11" )
@property
def snake_case_ ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case_ ( self ):
return 1e-5 | 237 | 1 |
from __future__ import annotations
from typing import Any
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = num_of_nodes
_SCREAMING_SNAKE_CASE : list[list[int]] = []
_SCREAMING_SNAKE_CASE : dict[int, int] = {}
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_SCREAMING_SNAKE_CASE : str = self.find_component(lowercase_ )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
if component_size[u_node] <= component_size[v_node]:
_SCREAMING_SNAKE_CASE : Optional[int] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_ )
elif component_size[u_node] >= component_size[v_node]:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.find_component(lowercase_ )
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_ )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = []
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_SCREAMING_SNAKE_CASE : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_SCREAMING_SNAKE_CASE : Optional[int] = edge
_SCREAMING_SNAKE_CASE : Tuple = self.m_component[u]
_SCREAMING_SNAKE_CASE : Dict = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_SCREAMING_SNAKE_CASE : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_ ):
_SCREAMING_SNAKE_CASE : Optional[Any] = edge
_SCREAMING_SNAKE_CASE : str = self.m_component[u]
_SCREAMING_SNAKE_CASE : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
_SCREAMING_SNAKE_CASE : List[str] = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def lowerCamelCase__ ():
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'speech_to_text_2'
__snake_case = ['past_key_values']
__snake_case = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __lowerCamelCase=1_0_0_0_0 , __lowerCamelCase=6 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=4 , __lowerCamelCase=0.0 , __lowerCamelCase=True , __lowerCamelCase="relu" , __lowerCamelCase=2_5_6 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.02 , __lowerCamelCase=2 , __lowerCamelCase=True , __lowerCamelCase=1 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase=1_0_2_4 , **__lowerCamelCase , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
_SCREAMING_SNAKE_CASE : List[Any] = d_model
_SCREAMING_SNAKE_CASE : Dict = decoder_ffn_dim
_SCREAMING_SNAKE_CASE : Tuple = decoder_layers
_SCREAMING_SNAKE_CASE : List[str] = decoder_attention_heads
_SCREAMING_SNAKE_CASE : Optional[int] = dropout
_SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
_SCREAMING_SNAKE_CASE : List[Any] = activation_dropout
_SCREAMING_SNAKE_CASE : Dict = activation_function
_SCREAMING_SNAKE_CASE : Optional[int] = init_std
_SCREAMING_SNAKE_CASE : List[str] = decoder_layerdrop
_SCREAMING_SNAKE_CASE : int = use_cache
_SCREAMING_SNAKE_CASE : int = decoder_layers
_SCREAMING_SNAKE_CASE : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_SCREAMING_SNAKE_CASE : str = max_target_positions
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , ) | 381 | 0 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _A ( SCREAMING_SNAKE_CASE = "" ):
UpperCAmelCase__: Optional[Any] = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
UpperCAmelCase__: str = BeautifulSoup(requests.get(__lowerCAmelCase ).text ,"html.parser" )
UpperCAmelCase__: Union[str, Any] = soup.find_all("td" ,attrs="titleColumn" )
UpperCAmelCase__: Union[str, Any] = soup.find_all("td" ,class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__lowerCAmelCase ,__lowerCAmelCase )
}
def _A ( SCREAMING_SNAKE_CASE = "IMDb_Top_250_Movies.csv" ):
UpperCAmelCase__: List[Any] = get_imdb_top_aaa_movies()
with open(__lowerCAmelCase ,"w" ,newline="" ) as out_file:
UpperCAmelCase__: Optional[Any] = csv.writer(__lowerCAmelCase )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies() | 113 |
import numpy as np
def __lowercase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float ):
return np.where(vector > 0 , __lowerCAmelCase , (alpha * (np.exp(__lowerCAmelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 0 |
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _A : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = name
__SCREAMING_SNAKE_CASE : int = val
def __str__( self : Dict ):
"""simple docstring"""
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self : List[Any] , _A : Optional[int] ):
"""simple docstring"""
return self.val < other.val
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , _A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : Tuple = self.build_heap(_A )
def __getitem__( self : Union[str, Any] , _A : Optional[Any] ):
"""simple docstring"""
return self.get_value(_A )
def UpperCAmelCase__ ( self : List[str] , _A : List[str] ):
"""simple docstring"""
return (idx - 1) // 2
def UpperCAmelCase__ ( self : Any , _A : str ):
"""simple docstring"""
return idx * 2 + 1
def UpperCAmelCase__ ( self : List[str] , _A : int ):
"""simple docstring"""
return idx * 2 + 2
def UpperCAmelCase__ ( self : Optional[int] , _A : str ):
"""simple docstring"""
return self.heap_dict[key]
def UpperCAmelCase__ ( self : str , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = len(_A ) - 1
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_parent_idx(_A )
for idx, i in enumerate(_A ):
__SCREAMING_SNAKE_CASE : List[Any] = idx
__SCREAMING_SNAKE_CASE : int = i.val
for i in range(_A , -1 , -1 ):
self.sift_down(_A , _A )
return array
def UpperCAmelCase__ ( self : str , _A : int , _A : List[str] ):
"""simple docstring"""
while True:
__SCREAMING_SNAKE_CASE : str = self.get_left_child_idx(_A ) # noqa: E741
__SCREAMING_SNAKE_CASE : Tuple = self.get_right_child_idx(_A )
__SCREAMING_SNAKE_CASE : Any = idx
if l < len(_A ) and array[l] < array[idx]:
__SCREAMING_SNAKE_CASE : List[Any] = l
if r < len(_A ) and array[r] < array[smallest]:
__SCREAMING_SNAKE_CASE : Optional[int] = r
if smallest != idx:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = array[smallest], array[idx]
(
(
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
),
) : Union[str, Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
__SCREAMING_SNAKE_CASE : Optional[int] = smallest
else:
break
def UpperCAmelCase__ ( self : Optional[int] , _A : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_parent_idx(_A )
while p >= 0 and self.heap[p] > self.heap[idx]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = self.heap[idx], self.heap[p]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
__SCREAMING_SNAKE_CASE : Optional[int] = p
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_parent_idx(_A )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return self.heap[0]
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = self.heap[-1], self.heap[0]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def UpperCAmelCase__ ( self : int , _A : Dict ):
"""simple docstring"""
self.heap.append(_A )
__SCREAMING_SNAKE_CASE : Tuple = len(self.heap ) - 1
__SCREAMING_SNAKE_CASE : Tuple = node.val
self.sift_up(len(self.heap ) - 1 )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return len(self.heap ) == 0
def UpperCAmelCase__ ( self : int , _A : List[str] , _A : Union[str, Any] ):
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
__SCREAMING_SNAKE_CASE : Union[str, Any] = new_value
__SCREAMING_SNAKE_CASE : Any = new_value
self.sift_up(self.idx_of_element[node] )
lowercase_ = Node("""R""", -1)
lowercase_ = Node("""B""", 6)
lowercase_ = Node("""A""", 3)
lowercase_ = Node("""X""", 1)
lowercase_ = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowercase_ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = R'''\w+[.]\d+'''
__SCREAMING_SNAKE_CASE : Optional[int] = re.findall(snake_case , snake_case )
for pat in pats:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(snake_case , '''_'''.join(pat.split('''.''' ) ) )
return key
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__SCREAMING_SNAKE_CASE : str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__SCREAMING_SNAKE_CASE : Any = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__SCREAMING_SNAKE_CASE : List[str] = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__SCREAMING_SNAKE_CASE : Dict = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__SCREAMING_SNAKE_CASE : int = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__SCREAMING_SNAKE_CASE : str = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a__ ( snake_case , snake_case , snake_case=42 ):
"""simple docstring"""
# Step 1: Convert pytorch tensor to numpy
__SCREAMING_SNAKE_CASE : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__SCREAMING_SNAKE_CASE : Dict = flax_model.init_weights(PRNGKey(snake_case ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = flatten_dict(snake_case )
__SCREAMING_SNAKE_CASE : Dict = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__SCREAMING_SNAKE_CASE : int = rename_key(snake_case )
__SCREAMING_SNAKE_CASE : Dict = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = rename_key_and_reshape_tensor(snake_case , snake_case , snake_case )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
__SCREAMING_SNAKE_CASE : Any = jnp.asarray(snake_case )
return unflatten_dict(snake_case )
| 131 | 1 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = VideoMAEConfig()
set_architecture_configs(__a ,__a )
if "finetuned" not in model_name:
_a : Optional[Any] = False
if "finetuned" in model_name:
_a : str = '''huggingface/label-files'''
if "kinetics" in model_name:
_a : Optional[int] = 400
_a : Union[str, Any] = '''kinetics400-id2label.json'''
elif "ssv2" in model_name:
_a : List[Any] = 174
_a : List[Any] = '''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
_a : str = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : str = {int(__a ): v for k, v in idalabel.items()}
_a : str = idalabel
_a : Tuple = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __a : List[Any] ,__a : str ) -> List[str]:
"""simple docstring"""
if "small" in model_name:
_a : Any = 384
_a : int = 1_536
_a : Optional[int] = 12
_a : Optional[int] = 16
_a : Dict = 12
_a : Any = 3
_a : Any = 192
_a : int = 768
elif "large" in model_name:
_a : List[str] = 1_024
_a : Optional[int] = 4_096
_a : Tuple = 24
_a : Optional[Any] = 16
_a : List[Any] = 12
_a : str = 8
_a : Tuple = 512
_a : List[str] = 2_048
elif "huge" in model_name:
_a : Optional[int] = 1_280
_a : Optional[Any] = 5_120
_a : Union[str, Any] = 32
_a : str = 16
_a : Optional[Any] = 12
_a : List[str] = 8
_a : Union[str, Any] = 640
_a : Optional[Any] = 2_560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def __UpperCAmelCase ( __a : Union[str, Any] ) -> Dict:
"""simple docstring"""
if "encoder." in name:
_a : Tuple = name.replace('''encoder.''' ,'''''' )
if "cls_token" in name:
_a : Dict = name.replace('''cls_token''' ,'''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
_a : str = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
_a : Optional[int] = name.replace('''pos_embed''' ,'''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
_a : Union[str, Any] = name.replace('''patch_embed.proj''' ,'''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_a : Optional[Any] = name.replace('''patch_embed.norm''' ,'''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
_a : List[str] = name.replace('''decoder.blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
_a : Union[str, Any] = name.replace('''blocks''' ,'''videomae.encoder.layer''' )
if "attn.proj" in name:
_a : str = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name and "bias" not in name:
_a : Optional[int] = name.replace('''attn''' ,'''attention.self''' )
if "attn" in name:
_a : List[str] = name.replace('''attn''' ,'''attention.attention''' )
if "norm1" in name:
_a : int = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
_a : Union[str, Any] = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
_a : List[str] = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
_a : int = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
_a : Union[str, Any] = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
_a : List[str] = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
_a : List[str] = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_a : Dict = name.replace('''norm.weight''' ,'''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_a : Dict = name.replace('''norm.bias''' ,'''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
_a : Optional[int] = name.replace('''head''' ,'''classifier''' )
return name
def __UpperCAmelCase ( __a : Dict ,__a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_a : List[str] = orig_state_dict.pop(__a )
if key.startswith('''encoder.''' ):
_a : int = key.replace('''encoder.''' ,'''''' )
if "qkv" in key:
_a : List[str] = key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
_a : Dict = config.decoder_hidden_size
_a : List[str] = int(key_split[2] )
_a : Any = '''decoder.decoder_layers.'''
if "weight" in key:
_a : Tuple = val[:dim, :]
_a : Union[str, Any] = val[dim : dim * 2, :]
_a : List[Any] = val[-dim:, :]
else:
_a : Union[str, Any] = config.hidden_size
_a : Tuple = int(key_split[1] )
_a : str = '''videomae.encoder.layer.'''
if "weight" in key:
_a : Optional[int] = val[:dim, :]
_a : int = val[dim : dim * 2, :]
_a : Union[str, Any] = val[-dim:, :]
else:
_a : int = val
return orig_state_dict
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : List[Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' )
_a : Dict = np.load(__a )
return list(__a )
def __UpperCAmelCase ( __a : Optional[int] ,__a : Any ,__a : Tuple ,__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_a : Optional[Any] = get_videomae_config(__a )
if "finetuned" in model_name:
_a : int = VideoMAEForVideoClassification(__a )
else:
_a : Tuple = VideoMAEForPreTraining(__a )
# download original checkpoint, hosted on Google Drive
_a : Tuple = '''pytorch_model.bin'''
gdown.cached_download(__a ,__a ,quiet=__a )
_a : Dict = torch.load(__a ,map_location='''cpu''' )
if "model" in files:
_a : Tuple = files['''model''']
else:
_a : str = files['''module''']
_a : int = convert_state_dict(__a ,__a )
model.load_state_dict(__a )
model.eval()
# verify model on basic input
_a : Tuple = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
_a : Union[str, Any] = prepare_video()
_a : Any = image_processor(__a ,return_tensors='''pt''' )
if "finetuned" not in model_name:
_a : Optional[Any] = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' ,filename='''bool_masked_pos.pt''' )
_a : Dict = torch.load(__a )
_a : Optional[int] = model(**__a )
_a : str = outputs.logits
_a : Tuple = [
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_a : Optional[Any] = torch.Size([1, 400] )
_a : str = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
_a : Any = torch.Size([1, 174] )
_a : int = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
_a : List[Any] = torch.Size([1, 1_408, 1_536] )
_a : Dict = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
_a : Union[str, Any] = torch.Size([1, 1_408, 1_536] )
_a : List[str] = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
_a : Union[str, Any] = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
_a : str = torch.Size([1, 1_408, 1_536] )
_a : str = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
_a : int = torch.Size([1, 400] )
_a : Dict = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
_a : Optional[int] = torch.Size([1, 400] )
_a : List[Any] = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_a : int = torch.Size([1, 400] )
_a : str = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
_a : Tuple = torch.Size([1, 400] )
_a : Union[str, Any] = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
_a : Optional[Any] = torch.Size([1, 1_408, 1_536] )
_a : str = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_a : List[str] = torch.Size([1, 174] )
_a : Optional[Any] = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
_a : List[Any] = torch.Size([1, 1_408, 1_536] )
_a : Tuple = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
_a : Union[str, Any] = torch.Size([1, 174] )
_a : Union[str, Any] = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(F"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] ,__a ,atol=1E-4 )
else:
print('''Logits:''' ,logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] ,__a ,atol=1E-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_a : Optional[int] = outputs.loss
assert torch.allclose(__a ,__a ,atol=1E-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
model.save_pretrained(__a )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__a ,organization='''nielsr''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''',
type=str,
help=(
'''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'''
''' download link.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/Users/nielsrogge/Documents/VideoMAE/Test''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''')
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 14 |
"""simple docstring"""
import itertools
import math
def lowerCamelCase ( _UpperCamelCase : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Tuple = 2
while True:
if is_prime(_UpperCamelCase ):
yield num
num += 1
def lowerCamelCase ( _UpperCamelCase : int = 1_0_0_0_1 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , _UpperCamelCase ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 139 | 0 |
from __future__ import annotations
def UpperCamelCase__( UpperCamelCase__ : list[list[int]] )->int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(UpperCamelCase__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(UpperCamelCase__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 212 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__: str = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: Union[str, Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: Union[str, Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
a__: Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 212 | 1 |
import math
__A : List[str] = 10
__A : Any = 7
__A : str = BALLS_PER_COLOUR * NUM_COLOURS
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 20 ) -> str:
"""simple docstring"""
_A = math.comb(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _SCREAMING_SNAKE_CASE )
_A = NUM_COLOURS * (1 - missing_colour / total)
return F"{result:.9f}"
if __name__ == "__main__":
print(solution(20))
| 27 | UpperCamelCase = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCamelCase = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCamelCase = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCamelCase = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCamelCase = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCamelCase = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCamelCase = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCamelCase = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 520 | 0 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :Optional[int] , a :Optional[int] , a :List[Any]=1_3 , a :str=7 , a :int=True , a :Union[str, Any]=True , a :Any=True , a :Tuple=True , a :List[str]=9_9 , a :str=6_4 , a :List[Any]=3_2 , a :List[Any]=5 , a :Optional[Any]=4 , a :Optional[int]=3_7 , a :Union[str, Any]="gelu" , a :str=0.1 , a :List[str]=0.1 , a :Any=5_1_2 , a :Dict=1_6 , a :int=2 , a :List[str]=0.02 , a :Optional[Any]=3 , a :Dict=4 , a :List[str]=None , ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = parent
__UpperCamelCase : List[Any] = batch_size
__UpperCamelCase : Union[str, Any] = seq_length
__UpperCamelCase : Optional[int] = is_training
__UpperCamelCase : List[str] = use_input_mask
__UpperCamelCase : List[str] = use_token_type_ids
__UpperCamelCase : Tuple = use_labels
__UpperCamelCase : Optional[Any] = vocab_size
__UpperCamelCase : int = hidden_size
__UpperCamelCase : Any = embedding_size
__UpperCamelCase : Any = num_hidden_layers
__UpperCamelCase : str = num_attention_heads
__UpperCamelCase : Tuple = intermediate_size
__UpperCamelCase : Optional[Any] = hidden_act
__UpperCamelCase : List[Any] = hidden_dropout_prob
__UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCamelCase : int = max_position_embeddings
__UpperCamelCase : Any = type_vocab_size
__UpperCamelCase : Dict = type_sequence_label_size
__UpperCamelCase : Union[str, Any] = initializer_range
__UpperCamelCase : str = num_labels
__UpperCamelCase : Union[str, Any] = num_choices
__UpperCamelCase : Dict = scope
def _lowerCamelCase ( self :Optional[Any] ) -> Optional[Any]:
__UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
__UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Any = None
if self.use_token_type_ids:
__UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Any = None
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self :Optional[int] ) -> Optional[int]:
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self :int , a :str , a :Union[str, Any] , a :Optional[int] , a :int , a :Optional[Any] , a :List[Any] , a :str ) -> Tuple:
__UpperCamelCase : Union[str, Any] = MobileBertModel(config=a )
model.to(a )
model.eval()
__UpperCamelCase : Optional[int] = model(a , attention_mask=a , token_type_ids=a )
__UpperCamelCase : List[Any] = model(a , token_type_ids=a )
__UpperCamelCase : int = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self :Any , a :List[Any] , a :Tuple , a :List[Any] , a :List[Any] , a :int , a :int , a :Union[str, Any] ) -> Dict:
__UpperCamelCase : Any = MobileBertForMaskedLM(config=a )
model.to(a )
model.eval()
__UpperCamelCase : List[Any] = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self :Tuple , a :int , a :List[Any] , a :List[str] , a :List[Any] , a :Tuple , a :List[str] , a :Optional[Any] ) -> Tuple:
__UpperCamelCase : int = MobileBertForNextSentencePrediction(config=a )
model.to(a )
model.eval()
__UpperCamelCase : List[str] = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self :Union[str, Any] , a :Optional[int] , a :str , a :Optional[Any] , a :List[str] , a :Union[str, Any] , a :Tuple , a :Tuple ) -> List[str]:
__UpperCamelCase : int = MobileBertForPreTraining(config=a )
model.to(a )
model.eval()
__UpperCamelCase : Tuple = model(
a , attention_mask=a , token_type_ids=a , labels=a , next_sentence_label=a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self :List[str] , a :List[str] , a :str , a :Tuple , a :List[Any] , a :Any , a :str , a :int ) -> Optional[Any]:
__UpperCamelCase : Union[str, Any] = MobileBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
__UpperCamelCase : str = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self :Optional[int] , a :List[Any] , a :str , a :List[str] , a :Tuple , a :Any , a :List[Any] , a :List[Any] ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.num_labels
__UpperCamelCase : Optional[int] = MobileBertForSequenceClassification(a )
model.to(a )
model.eval()
__UpperCamelCase : Dict = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self :int , a :Dict , a :List[Any] , a :int , a :List[str] , a :Dict , a :Tuple , a :List[Any] ) -> Dict:
__UpperCamelCase : str = self.num_labels
__UpperCamelCase : Optional[int] = MobileBertForTokenClassification(config=a )
model.to(a )
model.eval()
__UpperCamelCase : Any = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self :Optional[Any] , a :List[str] , a :Tuple , a :List[Any] , a :Tuple , a :int , a :List[Any] , a :List[str] ) -> Tuple:
__UpperCamelCase : Tuple = self.num_choices
__UpperCamelCase : Any = MobileBertForMultipleChoice(config=a )
model.to(a )
model.eval()
__UpperCamelCase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase : Dict = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self :List[str] ) -> List[Any]:
__UpperCamelCase : Tuple = self.prepare_config_and_inputs()
(
__UpperCamelCase
) : List[str] = config_and_inputs
__UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __lowercase , __lowercase , unittest.TestCase):
'''simple docstring'''
_A = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_A = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_A = True
def _lowerCamelCase ( self :Any , a :Union[str, Any] , a :Tuple , a :Tuple=False ) -> List[Any]:
__UpperCamelCase : str = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class in get_values(a ):
__UpperCamelCase : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a )
__UpperCamelCase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _lowerCamelCase ( self :List[Any] ) -> Optional[Any]:
__UpperCamelCase : str = MobileBertModelTester(self )
__UpperCamelCase : str = ConfigTester(self , config_class=a , hidden_size=3_7 )
def _lowerCamelCase ( self :Optional[int] ) -> Tuple:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self :Optional[int] ) -> str:
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*a )
def _lowerCamelCase ( self :Optional[int] ) -> List[str]:
__UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*a )
def _lowerCamelCase ( self :Tuple ) -> int:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*a )
def _lowerCamelCase ( self :Any ) -> Optional[int]:
__UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*a )
def _lowerCamelCase ( self :Any ) -> Optional[Any]:
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*a )
def _lowerCamelCase ( self :Any ) -> Optional[Any]:
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*a )
def _lowerCamelCase ( self :int ) -> int:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*a )
def _lowerCamelCase ( self :Union[str, Any] ) -> Dict:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*a )
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple) -> str:
'''simple docstring'''
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
lowercase : List[str] = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowerCamelCase ( self :Optional[Any] ) -> List[Any]:
__UpperCamelCase : str = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(a )
__UpperCamelCase : List[str] = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
__UpperCamelCase : Optional[int] = model(a )[0]
__UpperCamelCase : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape , a )
__UpperCamelCase : Any = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6E0_7, 8.2_6_9_1_6_5_6E0_4, 1.6_5_2_1_8_3_8E0_5],
[-5.7_5_4_1_7_0_4E-0_1, 3.9_0_5_6_0_2_2E0_0, 4.4_0_1_1_5_0_7E0_0],
[2.6_0_4_7_3_5_9E0_0, 1.5_6_7_7_6_5_2E0_0, -1.7_3_2_4_1_8_8E-0_1],
]
] , device=a , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__UpperCamelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__UpperCamelCase : List[Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 713 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowercase : Any = 'pytorch_model.bin'
lowercase : List[str] = 'pytorch_model.bin.index.json'
lowercase : List[Any] = 'adapter_config.json'
lowercase : str = 'adapter_model.bin'
lowercase : List[str] = 'adapter_model.safetensors'
lowercase : Any = 'tf_model.h5'
lowercase : str = 'tf_model.h5.index.json'
lowercase : Any = 'model.ckpt'
lowercase : Optional[int] = 'flax_model.msgpack'
lowercase : Dict = 'flax_model.msgpack.index.json'
lowercase : Dict = 'model.safetensors'
lowercase : Dict = 'model.safetensors.index.json'
lowercase : Union[str, Any] = 'config.json'
lowercase : Tuple = 'preprocessor_config.json'
lowercase : Tuple = FEATURE_EXTRACTOR_NAME
lowercase : Dict = 'generation_config.json'
lowercase : Dict = 'modelcard.json'
lowercase : Optional[int] = '▁'
lowercase : Any = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowercase : List[Any] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowercase : Tuple = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowercase : Any = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any]) -> List[str]:
'''simple docstring'''
if version.parse(_lowerCamelCase) < version.parse(_lowerCamelCase):
if "dev" in min_version:
__UpperCamelCase : List[str] = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
__UpperCamelCase : Optional[Any] = F'This example requires a minimum version of {min_version},'
error_message += F' but the version found is {__version__}.\n'
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers.") | 94 | 0 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
lowerCAmelCase_ : List[str] = 6378137.0
lowerCAmelCase_ : Dict = 6356752.314245
lowerCAmelCase_ : List[Any] = 6378137
def _lowerCamelCase (__lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Tuple ) -> float:
a__ = (AXIS_A - AXIS_B) / AXIS_A
a__ = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
a__ = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
a__ = radians(__lowerCAmelCase )
a__ = radians(__lowerCAmelCase )
# Equation
a__ = sin((phi_a - phi_a) / 2 )
a__ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
a__ = sqrt(sin_sq_phi + (cos(__lowerCAmelCase ) * cos(__lowerCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 489 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __UpperCAmelCase ( unittest.TestCase ):
A__ : List[Any] = StableDiffusionLDMaDPipeline
A__ : List[str] = TEXT_TO_IMAGE_PARAMS
A__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
A__ : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self ):
torch.manual_seed(0 )
lowerCamelCase__ =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCamelCase__ =DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
lowerCamelCase__ =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCamelCase__ =CLIPTextModel(_lowerCamelCase )
lowerCamelCase__ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase__ ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _a ( self , _lowerCamelCase , _lowerCamelCase=0 ):
if str(_lowerCamelCase ).startswith("mps" ):
lowerCamelCase__ =torch.manual_seed(_lowerCamelCase )
else:
lowerCamelCase__ =torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
lowerCamelCase__ ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _a ( self ):
lowerCamelCase__ ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ =self.get_dummy_components()
lowerCamelCase__ =StableDiffusionLDMaDPipeline(**_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe.to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCamelCase__ =self.get_dummy_inputs(_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =rgb[0, -3:, -3:, -1]
lowerCamelCase__ =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCamelCase__ =np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
lowerCamelCase__ =np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def _a ( self ):
lowerCamelCase__ =self.get_dummy_components()
lowerCamelCase__ =StableDiffusionLDMaDPipeline(**_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe.to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCamelCase__ =self.get_dummy_inputs(_lowerCamelCase )
lowerCamelCase__ =3 * [inputs["prompt"]]
# forward
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =rgb_slice_a[0, -3:, -3:, -1]
lowerCamelCase__ =depth_slice_a[0, -3:, -1]
lowerCamelCase__ =self.get_dummy_inputs(_lowerCamelCase )
lowerCamelCase__ =3 * [inputs.pop("prompt" )]
lowerCamelCase__ =ldmad_pipe.tokenizer(
_lowerCamelCase , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_lowerCamelCase , return_tensors="pt" , )
lowerCamelCase__ =text_inputs["input_ids"].to(_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe.text_encoder(_lowerCamelCase )[0]
lowerCamelCase__ =prompt_embeds
# forward
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =rgb_slice_a[0, -3:, -3:, -1]
lowerCamelCase__ =depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def _a ( self ):
lowerCamelCase__ ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ =self.get_dummy_components()
lowerCamelCase__ =PNDMScheduler(skip_prk_steps=_lowerCamelCase )
lowerCamelCase__ =StableDiffusionLDMaDPipeline(**_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe.to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCamelCase__ =self.get_dummy_inputs(_lowerCamelCase )
lowerCamelCase__ ="french fries"
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase , negative_prompt=_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =rgb[0, -3:, -3:, -1]
lowerCamelCase__ =depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCamelCase__ =np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
lowerCamelCase__ =np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def _a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCamelCase , _lowerCamelCase="cpu" , _lowerCamelCase=torch.floataa , _lowerCamelCase=0 ):
lowerCamelCase__ =torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
lowerCamelCase__ =np.random.RandomState(_lowerCamelCase ).standard_normal((1, 4, 64, 64) )
lowerCamelCase__ =torch.from_numpy(_lowerCamelCase ).to(device=_lowerCamelCase , dtype=_lowerCamelCase )
lowerCamelCase__ ={
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _a ( self ):
lowerCamelCase__ =StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
lowerCamelCase__ =ldmad_pipe.to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCamelCase__ =self.get_inputs(_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =rgb[0, -3:, -3:, -1].flatten()
lowerCamelCase__ =rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
lowerCamelCase__ =np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
lowerCamelCase__ =np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def _a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCamelCase , _lowerCamelCase="cpu" , _lowerCamelCase=torch.floataa , _lowerCamelCase=0 ):
lowerCamelCase__ =torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
lowerCamelCase__ =np.random.RandomState(_lowerCamelCase ).standard_normal((1, 4, 64, 64) )
lowerCamelCase__ =torch.from_numpy(_lowerCamelCase ).to(device=_lowerCamelCase , dtype=_lowerCamelCase )
lowerCamelCase__ ={
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _a ( self ):
lowerCamelCase__ =StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCamelCase__ =self.get_inputs(_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =0.4_9_5_5_8_6
lowerCamelCase__ =0.3_3_7_9_5_5_1_5
lowerCamelCase__ =1_1_2.4_8_5_1_8
lowerCamelCase__ =9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def _a ( self ):
lowerCamelCase__ =StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(_lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCamelCase__ =self.get_inputs(_lowerCamelCase )
lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth
lowerCamelCase__ =0.4_1_9_4_1_2_7
lowerCamelCase__ =0.3_5_3_7_5_5_8_6
lowerCamelCase__ =0.5_6_3_8_5_0_2
lowerCamelCase__ =0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 530 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowercase : Optional[int] =logging.get_logger(__name__)
_lowercase : Any ={
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE (lowercase__ ):
A__ = 'codegen'
A__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : str , __UpperCamelCase : Union[str, Any]=50400 , __UpperCamelCase : int=2048 , __UpperCamelCase : Dict=2048 , __UpperCamelCase : Tuple=4096 , __UpperCamelCase : Optional[int]=28 , __UpperCamelCase : Dict=16 , __UpperCamelCase : Tuple=64 , __UpperCamelCase : Dict=None , __UpperCamelCase : Tuple="gelu_new" , __UpperCamelCase : int=0.0 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : int=1e-5 , __UpperCamelCase : Union[str, Any]=0.02 , __UpperCamelCase : List[str]=True , __UpperCamelCase : Tuple=50256 , __UpperCamelCase : Optional[Any]=50256 , __UpperCamelCase : Any=False , **__UpperCamelCase : int , ) -> Tuple:
"""simple docstring"""
snake_case__ : List[Any] = vocab_size
snake_case__ : Optional[int] = n_ctx
snake_case__ : List[Any] = n_positions
snake_case__ : Optional[Any] = n_embd
snake_case__ : Any = n_layer
snake_case__ : Union[str, Any] = n_head
snake_case__ : Union[str, Any] = n_inner
snake_case__ : Union[str, Any] = rotary_dim
snake_case__ : Tuple = activation_function
snake_case__ : Union[str, Any] = resid_pdrop
snake_case__ : Optional[Any] = embd_pdrop
snake_case__ : List[str] = attn_pdrop
snake_case__ : Optional[Any] = layer_norm_epsilon
snake_case__ : Tuple = initializer_range
snake_case__ : Dict = use_cache
snake_case__ : Optional[Any] = bos_token_id
snake_case__ : List[str] = eos_token_id
super().__init__(
bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , tie_word_embeddings=__UpperCamelCase , **__UpperCamelCase )
class _SCREAMING_SNAKE_CASE (lowercase__ ):
def __init__( self : Union[str, Any] , __UpperCamelCase : PretrainedConfig , __UpperCamelCase : str = "default" , __UpperCamelCase : List[PatchingSpec] = None , __UpperCamelCase : bool = False , ) -> Any:
"""simple docstring"""
super().__init__(__UpperCamelCase , task=__UpperCamelCase , patching_specs=__UpperCamelCase , use_past=__UpperCamelCase )
if not getattr(self._config , '''pad_token_id''' , __UpperCamelCase ):
# TODO: how to do that better?
snake_case__ : Any = 0
@property
def lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
snake_case__ : Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction='''inputs''' )
snake_case__ : Optional[int] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
snake_case__ : Any = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return self._config.n_head
def lowerCAmelCase ( self : Optional[Any] , __UpperCamelCase : PreTrainedTokenizer , __UpperCamelCase : int = -1 , __UpperCamelCase : int = -1 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = super(__UpperCamelCase , self ).generate_dummy_inputs(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
# We need to order the input in the way they appears in the forward()
snake_case__ : Optional[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ , snake_case__ : List[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case__ : List[Any] = seqlen + 2
snake_case__ : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case__ : Optional[int] = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(self.num_layers )
]
snake_case__ : int = common_inputs['''attention_mask''']
if self.use_past:
snake_case__ : List[Any] = ordered_inputs['''attention_mask'''].dtype
snake_case__ : Optional[int] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return 13
| 574 |
'''simple docstring'''
from pathlib import Path
import fire
def __UpperCAmelCase ( UpperCamelCase__ :str , UpperCamelCase__ :str , UpperCamelCase__ :int ) -> List[Any]:
snake_case__ : int = Path(UpperCamelCase__ )
snake_case__ : Tuple = Path(UpperCamelCase__ )
dest_dir.mkdir(exist_ok=UpperCamelCase__ )
for path in src_dir.iterdir():
snake_case__ : List[str] = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case__ : Union[str, Any] = dest_dir.joinpath(path.name )
print(UpperCamelCase__ )
dest_path.open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 574 | 1 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class a ( lowerCAmelCase_ ):
_snake_case : Optional[float] = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
_snake_case : bool = field(default=lowerCAmelCase_ , metadata={'help': 'Whether to SortishSamler or not.'} )
_snake_case : bool = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
_snake_case : bool = field(default=lowerCAmelCase_ , metadata={'help': 'whether to use adafactor'} )
_snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
_snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
_snake_case : Optional[float] = field(default=lowerCAmelCase_ , metadata={'help': 'Dropout probability. Goes into model.config.'} )
_snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
_snake_case : Optional[str] = field(
default='linear' , metadata={'help': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 277 | """simple docstring"""
from collections.abc import Generator
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = 0, 1
while True:
_UpperCAmelCase , _UpperCAmelCase = b, a + b
yield b
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 1
_UpperCAmelCase = fibonacci_generator()
while len(str(next(lowercase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 277 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
__SCREAMING_SNAKE_CASE = {'mgp-str': 2_7}
class lowerCAmelCase__ ( _UpperCamelCase ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , A__ : Dict , A__ : str="[GO]" , A__ : Optional[int]="[GO]" , A__ : int="[s]" , A__ : Tuple="[GO]" , **A__ : Any ) -> Tuple:
'''simple docstring'''
super().__init__(
unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
a__ : Tuple = json.load(_UpperCAmelCase )
a__ : Optional[int] = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return len(self.vocab )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
a__ : str = []
for s in text:
char_tokens.extend(_UpperCAmelCase )
return char_tokens
def __lowerCAmelCase ( self : Optional[Any] , A__ : str ) -> str:
'''simple docstring'''
return self.vocab.get(_UpperCAmelCase , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self : Optional[Any] , A__ : int ) -> Any:
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase )
def __lowerCAmelCase ( self : str , A__ : List[str] , A__ : Union[str, Any] = None ) -> List[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_UpperCAmelCase ) )
return
a__ : Optional[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + '''\n''' )
return (vocab_file,)
| 713 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = '▁'
__SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
__SCREAMING_SNAKE_CASE = {
'facebook/nllb-200-distilled-600M': 1_0_2_4,
}
# fmt: off
__SCREAMING_SNAKE_CASE = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = []
__UpperCamelCase = []
def __init__( self : Union[str, Any] , A__ : List[str] , A__ : Union[str, Any]="<s>" , A__ : str="</s>" , A__ : Dict="</s>" , A__ : Union[str, Any]="<s>" , A__ : int="<unk>" , A__ : int="<pad>" , A__ : Any="<mask>" , A__ : List[str]=None , A__ : Union[str, Any]=None , A__ : Optional[int]=None , A__ : Optional[Dict[str, Any]] = None , A__ : int=None , A__ : Tuple=False , **A__ : str , ) -> List[Any]:
'''simple docstring'''
a__ : str = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
a__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
a__ : Optional[Any] = legacy_behaviour
super().__init__(
bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , cls_token=A__ , pad_token=A__ , mask_token=A__ , tokenizer_file=A__ , src_lang=A__ , tgt_lang=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=A__ , **A__ , )
a__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A__ ) )
a__ : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
a__ : Dict = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a__ : List[str] = 1
a__ : List[Any] = len(self.sp_model )
a__ : Tuple = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A__ )
}
a__ : List[Any] = {v: k for k, v in self.lang_code_to_id.items()}
a__ : Any = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
a__ : str = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
a__ : Tuple = src_lang if src_lang is not None else '''eng_Latn'''
a__ : Dict = self.lang_code_to_id[self._src_lang]
a__ : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
a__ : List[Any] = self.__dict__.copy()
a__ : Optional[Any] = None
a__ : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int , A__ : int ) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a__ : str = {}
a__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowerCAmelCase ( self : List[str] , A__ : str ) -> None:
'''simple docstring'''
a__ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCAmelCase ( self : Dict , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
a__ : List[str] = [1] * len(self.prefix_tokens )
a__ : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A__ )) + suffix_ones
return prefix_ones + ([0] * len(A__ )) + ([0] * len(A__ )) + suffix_ones
def __lowerCAmelCase ( self : Any , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCAmelCase ( self : int , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : Optional[int] = [self.sep_token_id]
a__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self : Dict , A__ : Optional[int] , A__ : str , A__ : Optional[str] , A__ : Optional[str] , **A__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
a__ : List[Any] = src_lang
a__ : List[Any] = self(A__ , add_special_tokens=A__ , return_tensors=A__ , **A__ )
a__ : Union[str, Any] = self.convert_tokens_to_ids(A__ )
a__ : Union[str, Any] = tgt_lang_id
return inputs
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Union[str, Any] , A__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(A__ , out_type=A__ )
def __lowerCAmelCase ( self : Any , A__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a__ : Tuple = self.sp_model.PieceToId(A__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self : List[Any] , A__ : int ) -> Union[str, Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Any ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = ''''''.join(A__ ).replace(A__ , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self : Optional[Any] , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ : Tuple = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , '''wb''' ) as fi:
a__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
def __lowerCAmelCase ( self : Optional[Any] , A__ : List[str] , A__ : str = "eng_Latn" , A__ : Optional[List[str]] = None , A__ : str = "fra_Latn" , **A__ : Any , ) -> BatchEncoding:
'''simple docstring'''
a__ : Optional[Any] = src_lang
a__ : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(A__ , A__ , **A__ )
def __lowerCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCAmelCase ( self : int , A__ : Union[str, Any] ) -> None:
'''simple docstring'''
a__ : List[str] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
a__ : Union[str, Any] = []
a__ : Dict = [self.eos_token_id, self.cur_lang_code]
else:
a__ : Tuple = [self.cur_lang_code]
a__ : List[Any] = [self.eos_token_id]
def __lowerCAmelCase ( self : Dict , A__ : str ) -> None:
'''simple docstring'''
a__ : List[str] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
a__ : Optional[Any] = []
a__ : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
a__ : List[str] = [self.cur_lang_code]
a__ : str = [self.eos_token_id]
| 340 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 45 | import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = (UnCLIPScheduler,)
def snake_case__ ( self , **lowerCAmelCase_ ):
__lowercase = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**lowerCAmelCase_ )
return config
def snake_case__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def snake_case__ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def snake_case__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def snake_case__ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowerCAmelCase_ )
def snake_case__ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def snake_case__ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowerCAmelCase_ , prev_timestep=lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="fixed_small_log" )
__lowercase = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0E-1_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def snake_case__ ( self ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="learned_range" )
__lowercase = scheduler_class(**lowerCAmelCase_ )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowerCAmelCase_ ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=lowerCAmelCase_ ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=lowerCAmelCase_ ) - -0.0_01_00_11 < 1E-5
def snake_case__ ( self ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCAmelCase_ )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase_ ):
# 1. predict noise residual
__lowercase = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(lowerCAmelCase_ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def snake_case__ ( self ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase_ ):
# 1. predict noise residual
__lowercase = model(lowerCAmelCase_ , lowerCAmelCase_ )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , prev_timestep=lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(lowerCAmelCase_ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
| 321 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : Dict = parent
snake_case__ : Dict = batch_size
snake_case__ : Tuple = image_size
snake_case__ : int = patch_size
snake_case__ : str = num_channels
snake_case__ : List[Any] = is_training
snake_case__ : Optional[Any] = use_labels
snake_case__ : List[Any] = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : List[Any] = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Any = type_sequence_label_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : Dict = (image_size // patch_size) ** 2
snake_case__ : List[str] = num_patches + 1
def __UpperCamelCase ( self ):
snake_case__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Optional[int] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = ViTMSNModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = self.type_sequence_label_size
snake_case__ : Dict = ViTMSNForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : str = 1
snake_case__ : List[str] = ViTMSNForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : int = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ : List[Any] = config_and_inputs
snake_case__ : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = ViTMSNModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[str] = ViTMSNModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
snake_case__ : int = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.default_image_processor
snake_case__ : str = prepare_img()
snake_case__ : Any = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 713 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase__ = Features({'''audio''': Audio()} )
lowerCamelCase__ = Features({'''transcription''': Value('''string''' )} )
lowerCamelCase__ = "audio"
lowerCamelCase__ = "transcription"
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if self.audio_column not in features:
raise ValueError(f"Column {self.audio_column} is not present in features." )
if not isinstance(features[self.audio_column] , __SCREAMING_SNAKE_CASE ):
raise ValueError(f"Column {self.audio_column} is not an Audio type." )
snake_case__ : List[Any] = copy.deepcopy(self )
snake_case__ : Dict = self.input_schema.copy()
snake_case__ : List[str] = features[self.audio_column]
snake_case__ : str = input_schema
return task_template
@property
def __UpperCamelCase ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 419 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''')) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''')
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
])
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=lowerCamelCase__ , )
assert hasattr(self , '''env''' )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE=1 ) -> List[str]:
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=lowerCamelCase__ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase__ , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
TrainingJobAnalytics(lowerCamelCase__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__snake_case = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__snake_case = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__snake_case = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__snake_case = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCamelCase__ )
| 24 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__A = "CompVis/stable-diffusion-v1-1"
__A = "CompVis/stable-diffusion-v1-2"
__A = "CompVis/stable-diffusion-v1-3"
__A = "CompVis/stable-diffusion-v1-4"
class A ( __UpperCAmelCase ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , ) -> Optional[int]:
'''simple docstring'''
super()._init_()
lowercase__ = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
lowercase__ = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
lowercase__ = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
lowercase__ = StableDiffusionPipeline(
vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , requires_safety_checker=lowerCamelCase__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A__ ( self ) -> Dict[str, Any]:
'''simple docstring'''
return {k: getattr(self , lowerCamelCase__ ) for k in self.config.keys() if not k.startswith("""_""" )}
def A__ ( self , lowerCamelCase__ = "auto" ) -> int:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase__ )
def A__ ( self ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase__ )
@torch.no_grad()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = 512 , lowerCamelCase__ = 512 , lowerCamelCase__ = 50 , lowerCamelCase__ = 7.5 , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = 1 , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = 512 , lowerCamelCase__ = 512 , lowerCamelCase__ = 50 , lowerCamelCase__ = 7.5 , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = 1 , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = 512 , lowerCamelCase__ = 512 , lowerCamelCase__ = 50 , lowerCamelCase__ = 7.5 , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = 1 , **lowerCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = 512 , lowerCamelCase__ = 512 , lowerCamelCase__ = 50 , lowerCamelCase__ = 7.5 , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = 1 , **lowerCamelCase__ , ) -> Any:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = 512 , lowerCamelCase__ = 512 , lowerCamelCase__ = 50 , lowerCamelCase__ = 7.5 , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = 1 , **lowerCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
lowercase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(lowerCamelCase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
lowercase__ = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowercase__ = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowercase__ = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowercase__ = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 325 | 0 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 10**-10 ):
_UpperCAmelCase : List[str] = a
while True:
_UpperCAmelCase : str = Decimal(__lowerCAmelCase ) - (
Decimal(eval(__lowerCAmelCase ) ) / Decimal(eval(str(diff(__lowerCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__lowerCAmelCase ) ) < precision: # noqa: S307
return float(__lowerCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 40 |
'''simple docstring'''
import os
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , "num.txt" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40 | 1 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
a_ = checkpoint
a_ = {}
a_ = vae_state_dict["encoder.conv_in.weight"]
a_ = vae_state_dict["encoder.conv_in.bias"]
a_ = vae_state_dict["encoder.conv_out.weight"]
a_ = vae_state_dict["encoder.conv_out.bias"]
a_ = vae_state_dict["encoder.norm_out.weight"]
a_ = vae_state_dict["encoder.norm_out.bias"]
a_ = vae_state_dict["decoder.conv_in.weight"]
a_ = vae_state_dict["decoder.conv_in.bias"]
a_ = vae_state_dict["decoder.conv_out.weight"]
a_ = vae_state_dict["decoder.conv_out.bias"]
a_ = vae_state_dict["decoder.norm_out.weight"]
a_ = vae_state_dict["decoder.norm_out.bias"]
a_ = vae_state_dict["quant_conv.weight"]
a_ = vae_state_dict["quant_conv.bias"]
a_ = vae_state_dict["post_quant_conv.weight"]
a_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
a_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
a_ = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(lowercase__ )
}
# Retrieves the keys for the decoder up blocks only
a_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
a_ = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(lowercase__ )
}
for i in range(lowercase__ ):
a_ = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
a_ = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
a_ = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
a_ = renew_vae_resnet_paths(lowercase__ )
a_ = {"old": F"""down.{i}.block""", "new": F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(lowercase__ ,lowercase__ ,lowercase__ ,additional_replacements=[meta_path] ,config=lowercase__ )
a_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
a_ = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
a_ = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
a_ = renew_vae_resnet_paths(lowercase__ )
a_ = {"old": F"""mid.block_{i}""", "new": F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowercase__ ,lowercase__ ,lowercase__ ,additional_replacements=[meta_path] ,config=lowercase__ )
a_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
a_ = renew_vae_attention_paths(lowercase__ )
a_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(lowercase__ ,lowercase__ ,lowercase__ ,additional_replacements=[meta_path] ,config=lowercase__ )
conv_attn_to_linear(lowercase__ )
for i in range(lowercase__ ):
a_ = num_up_blocks - 1 - i
a_ = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
a_ = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
a_ = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
a_ = renew_vae_resnet_paths(lowercase__ )
a_ = {"old": F"""up.{block_id}.block""", "new": F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(lowercase__ ,lowercase__ ,lowercase__ ,additional_replacements=[meta_path] ,config=lowercase__ )
a_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
a_ = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
a_ = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
a_ = renew_vae_resnet_paths(lowercase__ )
a_ = {"old": F"""mid.block_{i}""", "new": F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowercase__ ,lowercase__ ,lowercase__ ,additional_replacements=[meta_path] ,config=lowercase__ )
a_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
a_ = renew_vae_attention_paths(lowercase__ )
a_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(lowercase__ ,lowercase__ ,lowercase__ ,additional_replacements=[meta_path] ,config=lowercase__ )
conv_attn_to_linear(lowercase__ )
return new_checkpoint
def __UpperCAmelCase (lowercase__ ,lowercase__ ,) -> Optional[int]:
'''simple docstring'''
a_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
a_ = io.BytesIO(r.content )
a_ = OmegaConf.load(lowercase__ )
a_ = 512
a_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
a_ = {}
with safe_open(lowercase__ ,framework="pt" ,device="cpu" ) as f:
for key in f.keys():
a_ = f.get_tensor(lowercase__ )
else:
a_ = torch.load(lowercase__ ,map_location=lowercase__ )["state_dict"]
# Convert the VAE model.
a_ = create_vae_diffusers_config(lowercase__ ,image_size=lowercase__ )
a_ = custom_convert_ldm_vae_checkpoint(lowercase__ ,lowercase__ )
a_ = AutoencoderKL(**lowercase__ )
vae.load_state_dict(lowercase__ )
vae.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
a_ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
import inspect
import unittest
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase_ ( self ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE : int = inspect.getmembers(A, inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE : Optional[Any] = 'k-diffusion'
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE : Any = 'invisible-watermark'
assert backend in deps, F"{backend} is not in the deps table!"
| 508 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
UpperCamelCase_ = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
UpperCamelCase_ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[int] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = PRETRAINED_VOCAB_FILES_MAP
A : Tuple = ['''input_ids''', '''attention_mask''']
A : Tuple = MBartTokenizer
A : List[int] = []
A : List[int] = []
def __init__( self, A=None, A=None, A="<s>", A="</s>", A="</s>", A="<s>", A="<unk>", A="<pad>", A="<mask>", A=None, A=None, A=None, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else mask_token
super().__init__(
vocab_file=A, tokenizer_file=A, bos_token=A, eos_token=A, sep_token=A, cls_token=A, unk_token=A, pad_token=A, mask_token=A, src_lang=A, tgt_lang=A, additional_special_tokens=A, **A, )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : int = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
SCREAMING_SNAKE_CASE : Tuple = {
lang_code: self.convert_tokens_to_ids(A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE : Tuple = src_lang if src_lang is not None else 'en_XX'
SCREAMING_SNAKE_CASE : int = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self, A, A, A, A, **A ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
SCREAMING_SNAKE_CASE : Optional[Any] = src_lang
SCREAMING_SNAKE_CASE : Optional[Any] = self(A, add_special_tokens=A, return_tensors=A, **A )
SCREAMING_SNAKE_CASE : int = self.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : str = tgt_lang_id
return inputs
def UpperCamelCase_ ( self, A, A = "en_XX", A = None, A = "ro_RO", **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = src_lang
SCREAMING_SNAKE_CASE : str = tgt_lang
return super().prepare_seqaseq_batch(A, A, **A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE : int = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE : List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
SCREAMING_SNAKE_CASE : List[str] = os.path.join(
A, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file, A )
return (out_vocab_file,)
| 508 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ):
_a = CpmAntTokenizer
_a = False
def __lowercase ( self : Union[str, Any] ):
super().setUp()
lowerCAmelCase = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
lowerCAmelCase = """今天天气真好!"""
lowerCAmelCase = ["""今天""", """天气""", """真""", """好""", """!"""]
lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = """今天天气真好!"""
lowerCAmelCase = [tokenizer.bos_token] + tokens
lowerCAmelCase = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
lowerCAmelCase = tokenizer.decode(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 169 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase (snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : List[str] ) -> str:
'''simple docstring'''
with open(snake_case__ ) as metadata_file:
lowerCAmelCase = json.load(snake_case__ )
lowerCAmelCase = LukeConfig(use_entity_aware_attention=snake_case__ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
lowerCAmelCase = torch.load(snake_case__ , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
lowerCAmelCase = load_original_entity_vocab(snake_case__ )
# add an entry for [MASK2]
lowerCAmelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowerCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCAmelCase = AddedToken("""<ent>""" , lstrip=snake_case__ , rstrip=snake_case__ )
lowerCAmelCase = AddedToken("""<ent2>""" , lstrip=snake_case__ , rstrip=snake_case__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ , """tokenizer_config.json""" ) , """r""" ) as f:
lowerCAmelCase = json.load(snake_case__ )
lowerCAmelCase = """MLukeTokenizer"""
with open(os.path.join(snake_case__ , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
with open(os.path.join(snake_case__ , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
lowerCAmelCase = MLukeTokenizer.from_pretrained(snake_case__ )
# Initialize the embeddings of the special tokens
lowerCAmelCase = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
lowerCAmelCase = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
lowerCAmelCase = state_dict["""embeddings.word_embeddings.weight"""]
lowerCAmelCase = word_emb[ent_init_index].unsqueeze(0 )
lowerCAmelCase = word_emb[enta_init_index].unsqueeze(0 )
lowerCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowerCAmelCase = state_dict[bias_name]
lowerCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 )
lowerCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 )
lowerCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCAmelCase = f'''encoder.layer.{layer_index}.attention.self.'''
lowerCAmelCase = state_dict[prefix + matrix_name]
lowerCAmelCase = state_dict[prefix + matrix_name]
lowerCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCAmelCase = state_dict["""entity_embeddings.entity_embeddings.weight"""]
lowerCAmelCase = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowerCAmelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowerCAmelCase = state_dict["""entity_predictions.bias"""]
lowerCAmelCase = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowerCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowerCAmelCase = LukeForMaskedLM(config=snake_case__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
lowerCAmelCase = state_dict[key]
else:
lowerCAmelCase = state_dict[key]
lowerCAmelCase , lowerCAmelCase = model.load_state_dict(snake_case__ , strict=snake_case__ )
if set(snake_case__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(snake_case__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowerCAmelCase = MLukeTokenizer.from_pretrained(snake_case__ , task="""entity_classification""" )
lowerCAmelCase = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
lowerCAmelCase = (0, 9)
lowerCAmelCase = tokenizer(snake_case__ , entity_spans=[span] , return_tensors="""pt""" )
lowerCAmelCase = model(**snake_case__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase = torch.Size((1, 33, 768) )
lowerCAmelCase = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase = torch.Size((1, 1, 768) )
lowerCAmelCase = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
lowerCAmelCase = MLukeTokenizer.from_pretrained(snake_case__ )
lowerCAmelCase = """Tokyo is the capital of <mask>."""
lowerCAmelCase = (24, 30)
lowerCAmelCase = tokenizer(snake_case__ , entity_spans=[span] , return_tensors="""pt""" )
lowerCAmelCase = model(**snake_case__ )
lowerCAmelCase = encoding["""input_ids"""][0].tolist()
lowerCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
lowerCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(snake_case__ )
lowerCAmelCase = outputs.entity_logits[0][0].argmax().item()
lowerCAmelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(snake_case__ ) )
model.save_pretrained(snake_case__ )
def lowercase (snake_case__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase = ["""[MASK]""", """[PAD]""", """[UNK]"""]
lowerCAmelCase = [json.loads(snake_case__ ) for line in open(snake_case__ )]
lowerCAmelCase = {}
for entry in data:
lowerCAmelCase = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowerCAmelCase = entity_id
break
lowerCAmelCase = f'''{language}:{entity_name}'''
lowerCAmelCase = entity_id
return new_mapping
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 169 | 1 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
lowercase_ = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
lowercase_ = {
"ctrl": 2_56,
}
lowercase_ = {
"Pregnancy": 16_86_29,
"Christianity": 76_75,
"Explain": 10_64_23,
"Fitness": 6_34_40,
"Saving": 6_31_63,
"Ask": 2_71_71,
"Ass": 9_59_85,
"Joke": 16_35_09,
"Questions": 4_56_22,
"Thoughts": 4_96_05,
"Retail": 5_23_42,
"Feminism": 16_43_38,
"Writing": 1_19_92,
"Atheism": 19_22_63,
"Netflix": 4_86_16,
"Computing": 3_96_39,
"Opinion": 4_32_13,
"Alone": 4_49_67,
"Funny": 5_89_17,
"Gaming": 4_03_58,
"Human": 40_88,
"India": 13_31,
"Joker": 7_71_38,
"Diet": 3_62_06,
"Legal": 1_18_59,
"Norman": 49_39,
"Tip": 7_26_89,
"Weight": 5_23_43,
"Movies": 4_62_73,
"Running": 2_34_25,
"Science": 20_90,
"Horror": 3_77_93,
"Confession": 6_05_72,
"Finance": 1_22_50,
"Politics": 1_63_60,
"Scary": 19_19_85,
"Support": 1_26_54,
"Technologies": 3_25_16,
"Teenage": 6_61_60,
"Event": 3_27_69,
"Learned": 6_74_60,
"Notion": 18_27_70,
"Wikipedia": 3_75_83,
"Books": 66_65,
"Extract": 7_60_50,
"Confessions": 10_27_01,
"Conspiracy": 7_59_32,
"Links": 6_36_74,
"Narcissus": 15_04_25,
"Relationship": 5_47_66,
"Relationships": 13_47_96,
"Reviews": 4_16_71,
"News": 42_56,
"Translation": 2_68_20,
"multilingual": 12_84_06,
}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
__snake_case : Tuple = set()
__snake_case : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case : Tuple = char
__snake_case : Optional[Any] = set(__SCREAMING_SNAKE_CASE )
return pairs
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[Any] = VOCAB_FILES_NAMES
A : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[Any] = CONTROL_CODES
def __init__( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int]="<unk>" , **_lowerCAmelCase : Any ):
super().__init__(unk_token=_lowerCAmelCase , **_lowerCAmelCase )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
__snake_case : int = json.load(_lowerCAmelCase )
__snake_case : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
__snake_case : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1]
__snake_case : List[str] = [tuple(merge.split() ) for merge in merges]
__snake_case : Union[str, Any] = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__snake_case : Any = {}
@property
def snake_case__ ( self : Optional[Any] ):
return len(self.encoder )
def snake_case__ ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Tuple ):
if token in self.cache:
return self.cache[token]
__snake_case : Any = tuple(_lowerCAmelCase )
__snake_case : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
__snake_case : List[str] = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
__snake_case : Union[str, Any] = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case : int = bigram
__snake_case : List[str] = []
__snake_case : Optional[Any] = 0
while i < len(_lowerCAmelCase ):
try:
__snake_case : Dict = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case : Union[str, Any] = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case : Optional[Any] = tuple(_lowerCAmelCase )
__snake_case : Optional[Any] = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
__snake_case : str = get_pairs(_lowerCAmelCase )
__snake_case : Any = """@@ """.join(_lowerCAmelCase )
__snake_case : List[str] = word[:-4]
__snake_case : Union[str, Any] = word
return word
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Tuple ):
__snake_case : List[str] = []
__snake_case : Union[str, Any] = re.findall(r"""\S+\n?""" , _lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def snake_case__ ( self : Tuple , _lowerCAmelCase : Optional[int] ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def snake_case__ ( self : Dict , _lowerCAmelCase : str ):
return self.decoder.get(_lowerCAmelCase , self.unk_token )
def snake_case__ ( self : Dict , _lowerCAmelCase : Optional[int] ):
__snake_case : str = """ """.join(_lowerCAmelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case : List[Any] = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : int = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
__snake_case : Optional[int] = 0
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
__snake_case : Any = token_index
writer.write(""" """.join(_lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 711 | import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : str=3 , _lowerCAmelCase : Union[str, Any]=32 , _lowerCAmelCase : str=3 , _lowerCAmelCase : int=10 , _lowerCAmelCase : str=[8, 16, 32, 64] , _lowerCAmelCase : Union[str, Any]=[1, 1, 2, 1] , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Optional[int]="relu" , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCAmelCase : List[str]=[2, 3, 4] , _lowerCAmelCase : List[Any]=1 , ):
__snake_case : Union[str, Any] = parent
__snake_case : Any = batch_size
__snake_case : Optional[Any] = image_size
__snake_case : Tuple = num_channels
__snake_case : Dict = embeddings_size
__snake_case : Any = hidden_sizes
__snake_case : Optional[int] = depths
__snake_case : List[str] = is_training
__snake_case : str = use_labels
__snake_case : str = hidden_act
__snake_case : Optional[Any] = num_labels
__snake_case : str = scope
__snake_case : List[Any] = len(_lowerCAmelCase )
__snake_case : int = out_features
__snake_case : Union[str, Any] = out_indices
__snake_case : Tuple = num_groups
def snake_case__ ( self : List[str] ):
__snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Optional[int] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def snake_case__ ( self : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] ):
__snake_case : Any = BitModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case__ ( self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple ):
__snake_case : Any = self.num_labels
__snake_case : Dict = BitForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : Optional[Any] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
__snake_case : Tuple = BitBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : Any = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case : Optional[Any] = None
__snake_case : Dict = BitBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : Tuple = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case__ ( self : Tuple ):
__snake_case : List[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : List[Any] = config_and_inputs
__snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
A : Tuple = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
A : Any = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
A : Dict = False
A : List[str] = False
A : str = False
A : List[Any] = False
A : str = False
def snake_case__ ( self : Any ):
__snake_case : List[str] = BitModelTester(self )
__snake_case : Any = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def snake_case__ ( self : int ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : Union[str, Any] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def snake_case__ ( self : Optional[Any] ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def snake_case__ ( self : int ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def snake_case__ ( self : Union[str, Any] ):
pass
def snake_case__ ( self : int ):
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(_lowerCAmelCase )
__snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Any = [*signature.parameters.keys()]
__snake_case : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def snake_case__ ( self : Any ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(config=_lowerCAmelCase )
for name, module in model.named_modules():
if isinstance(_lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def snake_case__ ( self : Optional[int] ):
def check_hidden_states_output(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ):
__snake_case : str = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : Optional[Any] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__snake_case : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[Any] = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__snake_case : Tuple = layer_type
__snake_case : List[str] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : List[str] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def snake_case__ ( self : Tuple ):
pass
def snake_case__ ( self : List[Any] ):
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def snake_case__ ( self : str ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[int] = BitModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def snake_case__ ( self : Union[str, Any] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def snake_case__ ( self : Any ):
__snake_case : List[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCAmelCase )
__snake_case : str = self.default_image_processor
__snake_case : List[str] = prepare_img()
__snake_case : Optional[Any] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Any = model(**_lowerCAmelCase )
# verify the logits
__snake_case : List[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__snake_case : List[str] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
A : Union[str, Any] = (BitBackbone,) if is_torch_available() else ()
A : List[str] = BitConfig
A : Any = False
def snake_case__ ( self : Optional[int] ):
__snake_case : int = BitModelTester(self )
| 390 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
a : Any = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a : Tuple = '''>>zh<<'''
a : str = '''Helsinki-NLP/'''
if is_torch_available():
a : Optional[Any] = '''pt'''
elif is_tf_available():
a : Tuple = '''tf'''
else:
a : Optional[Any] = '''jax'''
@require_sentencepiece
class a_ ( __lowerCAmelCase , unittest.TestCase ):
a : Optional[Any] = MarianTokenizer
a : Union[str, Any] = False
a : int = True
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
super().setUp()
_UpperCAmelCase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
_UpperCAmelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
_UpperCAmelCase = Path(self.tmpdirname )
save_json(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowerCamelCase_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
_UpperCAmelCase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : Optional[Any] , **__UpperCamelCase : Tuple ) ->List[str]:
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Dict ) ->List[Any]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
_UpperCAmelCase = """</s>"""
_UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def _snake_case ( self : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowerCamelCase_ ) , 9 )
def _snake_case ( self : Optional[Any] ) ->Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def _snake_case ( self : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
_UpperCAmelCase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
_UpperCAmelCase = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowerCamelCase_ , batch.input_ids[0] )
_UpperCAmelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCamelCase_ )
_UpperCAmelCase = [x.name for x in Path(lowerCamelCase_ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowerCamelCase_ )
MarianTokenizer.from_pretrained(lowerCamelCase_ )
def _snake_case ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = tok(
["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def _snake_case ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def _snake_case ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
_UpperCAmelCase = """Tämä on testi"""
_UpperCAmelCase = """This is a test"""
_UpperCAmelCase = [76, 7, 20_47, 2]
_UpperCAmelCase = [69, 12, 11, 9_40, 2]
_UpperCAmelCase = tokenizer(lowerCamelCase_ ).input_ids
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_UpperCAmelCase = tokenizer(text_target=lowerCamelCase_ ).input_ids
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_UpperCAmelCase = tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) | 555 | def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
_enforce_args(UpperCamelCase_ , UpperCamelCase_ )
if n == 0:
return 0
UpperCamelCase = float("""-inf""" )
for i in range(1 , n + 1 ):
UpperCamelCase = max(
UpperCamelCase_ , prices[i - 1] + naive_cut_rod_recursive(n - i , UpperCamelCase_ ) )
return max_revue
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
_enforce_args(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase = float("""-inf""" )
for i in range(1 , n + 1 ):
UpperCamelCase = max(
UpperCamelCase_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , UpperCamelCase_ , UpperCamelCase_ ) , )
UpperCamelCase = max_revenue
return max_rev[n]
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
_enforce_args(UpperCamelCase_ , UpperCamelCase_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase = [float("""-inf""" ) for _ in range(n + 1 )]
UpperCamelCase = 0
for i in range(1 , n + 1 ):
UpperCamelCase = max_rev[i]
for j in range(1 , i + 1 ):
UpperCamelCase = max(UpperCamelCase_ , prices[j - 1] + max_rev[i - j] )
UpperCamelCase = max_revenue_i
return max_rev[n]
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
if n < 0:
UpperCamelCase = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(UpperCamelCase_ )
if n > len(UpperCamelCase_ ):
UpperCamelCase = (
"""Each integral piece of rod must have a corresponding price. """
f"""Got n = {n} but length of prices = {len(UpperCamelCase_ )}"""
)
raise ValueError(UpperCamelCase_ )
def lowercase( ) -> str:
'''simple docstring'''
UpperCamelCase = [6, 10, 12, 15, 20, 23]
UpperCamelCase = len(UpperCamelCase_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase = 36
UpperCamelCase = top_down_cut_rod(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = bottom_up_cut_rod(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = naive_cut_rod_recursive(UpperCamelCase_ , UpperCamelCase_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 537 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197 | """simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :int ) -> list:
'''simple docstring'''
lowercase = word.split()
def justify(lowerCAmelCase__ :list , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> str:
lowercase = max_width - width
lowercase = len(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowercase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowercase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowercase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowerCAmelCase__ ):
num_spaces_between_words_list[i] += 1
lowercase = []
for i in range(lowerCAmelCase__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowerCAmelCase__ )
lowercase = []
lowercase = []
lowercase = 0
for word in words:
if width + len(lowerCAmelCase__ ) + len(lowerCAmelCase__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowerCAmelCase__ )
width += len(lowerCAmelCase__ )
else:
# justify the line and add it to result
answer.append(justify(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
# reset new line and new width
lowercase , lowercase = [word], len(lowerCAmelCase__ )
lowercase = max_width - width - len(lowerCAmelCase__ )
answer.append(""" """.join(lowerCAmelCase__ ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 197 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["""image_processor""", """tokenizer"""]
lowercase_ = """ViTImageProcessor"""
lowercase_ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__(self : Any , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : List[str]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
lowerCamelCase__: int =kwargs.pop("feature_extractor")
lowerCamelCase__: Any =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(a_ , a_)
def __call__(self : Dict , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : List[Any]) ->Dict:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images.")
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt.")
if text is not None:
lowerCamelCase__: List[Any] =self.tokenizer(a_ , return_tensors=a_ , **a_)
if visual_prompt is not None:
lowerCamelCase__: Union[str, Any] =self.image_processor(a_ , return_tensors=a_ , **a_)
if images is not None:
lowerCamelCase__: Any =self.image_processor(a_ , return_tensors=a_ , **a_)
if visual_prompt is not None and images is not None:
lowerCamelCase__: List[Any] ={
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCamelCase__: List[str] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCamelCase__: Tuple ={
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**a_) , tensor_type=a_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*a_ , **a_)
def SCREAMING_SNAKE_CASE_ (self : Any , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int]) ->int:
'''simple docstring'''
return self.tokenizer.decode(*a_ , **a_)
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->str:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor
| 59 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __UpperCAmelCase ) -> list[int]:
"""simple docstring"""
snake_case: Tuple =[True] * limit
snake_case: Optional[int] =False
snake_case: Union[str, Any] =False
snake_case: List[Any] =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
snake_case: str =i * 2
while index < limit:
snake_case: List[Any] =False
snake_case: List[str] =index + i
snake_case: Union[str, Any] =[2]
for i in range(3 , __UpperCAmelCase , 2 ):
if is_prime[i]:
primes.append(__UpperCAmelCase )
return primes
def a_ ( __UpperCAmelCase = 1_00_00_00 ) -> int:
"""simple docstring"""
snake_case: str =prime_sieve(__UpperCAmelCase )
snake_case: str =0
snake_case: str =0
for i in range(len(__UpperCAmelCase ) ):
for j in range(i + length , len(__UpperCAmelCase ) ):
snake_case: Tuple =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
snake_case: List[str] =j - i
snake_case: Optional[int] =sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 350 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __init__( self: List[str] , __A: VQModel , __A: UNetaDModel , __A: DDIMScheduler ) -> Tuple:
super().__init__()
self.register_modules(vqvae=__A , unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self: Optional[Any] , __A: int = 1 , __A: Optional[Union[torch.Generator, List[torch.Generator]]] = None , __A: float = 0.0 , __A: int = 50 , __A: Optional[str] = "pil" , __A: bool = True , **__A: int , ) -> Union[Tuple, ImagePipelineOutput]:
_A = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__A , )
_A = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_A = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__A )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
_A = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_A = {}
if accepts_eta:
_A = eta
for t in self.progress_bar(self.scheduler.timesteps ):
_A = self.scheduler.scale_model_input(__A , __A )
# predict the noise residual
_A = self.unet(__A , __A ).sample
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(__A , __A , __A , **__A ).prev_sample
# decode the image latents with the VAE
_A = self.vqvae.decode(__A ).sample
_A = (image / 2 + 0.5).clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
| 62 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
A_ = 42
A_ = jnp.floataa
def __A ( self: Tuple ) -> Tuple:
_A = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , __A: Dict ) -> Tuple:
_A ,_A ,_A ,_A = hidden_states.shape
_A = jax.image.resize(
__A , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
_A = self.conv(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
A_ = 42
A_ = jnp.floataa
def __A ( self: List[str] ) -> Tuple:
_A = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Union[str, Any] , __A: List[Any] ) -> Union[str, Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_A = self.conv(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
A_ = 42
A_ = None
A_ = 0.0
A_ = None
A_ = jnp.floataa
def __A ( self: Dict ) -> Dict:
_A = self.in_channels if self.out_channels is None else self.out_channels
_A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_A = nn.Conv(
__A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_A = nn.Dense(__A , dtype=self.dtype )
_A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_A = nn.Dropout(self.dropout_prob )
_A = nn.Conv(
__A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_A = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_A = None
if use_nin_shortcut:
_A = nn.Conv(
__A , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self: Dict , __A: List[Any] , __A: List[Any] , __A: Any=True ) -> List[Any]:
_A = hidden_states
_A = self.norma(__A )
_A = nn.swish(__A )
_A = self.conva(__A )
_A = self.time_emb_proj(nn.swish(__A ) )
_A = jnp.expand_dims(jnp.expand_dims(__A , 1 ) , 1 )
_A = hidden_states + temb
_A = self.norma(__A )
_A = nn.swish(__A )
_A = self.dropout(__A , __A )
_A = self.conva(__A )
if self.conv_shortcut is not None:
_A = self.conv_shortcut(__A )
return hidden_states + residual
| 62 | 1 |
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int=False ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Any = OmegaConf.load(__UpperCAmelCase )
if display:
print(yaml.dump(OmegaConf.to_container(__UpperCAmelCase ) ) )
return config
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Dict=None ) -> List[str]:
"""simple docstring"""
if conf_path is None:
UpperCAmelCase_ : List[str] = '''./model_checkpoints/vqgan_only.yaml'''
UpperCAmelCase_ : Dict = load_config(__UpperCAmelCase , display=__UpperCAmelCase )
UpperCAmelCase_ : str = VQModel(**config.model.params )
if ckpt_path is None:
UpperCAmelCase_ : Tuple = '''./model_checkpoints/vqgan_only.pt'''
UpperCAmelCase_ : int = torch.load(__UpperCAmelCase , map_location=__UpperCAmelCase )
if ".ckpt" in ckpt_path:
UpperCAmelCase_ : str = sd['''state_dict''']
model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
model.to(__UpperCAmelCase )
del sd
return model
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = model.encode(__UpperCAmelCase )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
UpperCAmelCase_ : Any = model.decode(__UpperCAmelCase )
return xrec
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = string.rsplit("." , 1 )
if reload:
UpperCAmelCase_ : Optional[Any] = importlib.import_module(__UpperCAmelCase )
importlib.reload(__UpperCAmelCase )
return getattr(importlib.import_module(__UpperCAmelCase , package=__UpperCAmelCase ) , cls )
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int]=True , _SCREAMING_SNAKE_CASE : Any=True ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = instantiate_from_config(__UpperCAmelCase )
if sd is not None:
model.load_state_dict(__UpperCAmelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
"""simple docstring"""
if ckpt:
UpperCAmelCase_ : Any = torch.load(__UpperCAmelCase , map_location="cpu" )
UpperCAmelCase_ : Optional[Any] = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
UpperCAmelCase_ : Union[str, Any] = {'''state_dict''': None}
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : str = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=__UpperCAmelCase , eval_mode=__UpperCAmelCase )['''model''']
return model, global_step
| 71 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowerCamelCase : str = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = test_results.split(''' ''' )
lowerCamelCase_ : Tuple = 0
lowerCamelCase_ : Optional[int] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase_ : Optional[Any] = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Tuple = {}
lowerCamelCase_ : List[Any] = None
lowerCamelCase_ : Any = False
for line in failures_short_lines.split('''\n''' ):
if re.search(R'''_ \[doctest\]''' , __UpperCAmelCase ):
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : Optional[Any] = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
lowerCamelCase_ : str = line
lowerCamelCase_ : List[Any] = False
return failures
class lowerCAmelCase__ :
def __init__( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = title
lowerCamelCase_ : Dict = doc_test_results['''time_spent'''].split(''',''' )[0]
lowerCamelCase_ : str = doc_test_results['''success''']
lowerCamelCase_ : int = doc_test_results['''failures''']
lowerCamelCase_ : List[Any] = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase_ : Union[str, Any] = doc_test_results
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ : Dict = [self._time_spent]
lowerCamelCase_ : Optional[int] = 0
for time in time_spent:
lowerCamelCase_ : Tuple = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(UpperCamelCase_ ) == 1:
lowerCamelCase_ : int = [0, 0, time_parts[0]]
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return F"""{int(UpperCamelCase_ )}h{int(UpperCamelCase_ )}m{int(UpperCamelCase_ )}s"""
@property
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Any = 40
lowerCamelCase_ : Tuple = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(UpperCamelCase_ , UpperCamelCase_ )}
lowerCamelCase_ : int = ''''''
for category, failures in category_failures.items():
if len(UpperCamelCase_ ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(UpperCamelCase_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ : str = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(UpperCamelCase_ )
@staticmethod
def __UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Any = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(UpperCamelCase_ )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=UpperCamelCase_ , )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
lowerCamelCase_ : Optional[int] = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else '''All tests passed.'''
lowerCamelCase_ : Union[str, Any] = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=UpperCamelCase_ , )
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : str = ''''''
for key, value in failures.items():
lowerCamelCase_ : List[Any] = value[:200] + ''' [Truncated]''' if len(UpperCamelCase_ ) > 250 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
lowerCamelCase_ : List[str] = job_name
lowerCamelCase_ : List[str] = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
lowerCamelCase_ : List[str] = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __UpperCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
lowerCamelCase_ : Dict = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
lowerCamelCase_ : List[Any] = sorted(self.doc_test_results.items() , key=lambda UpperCamelCase_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
lowerCamelCase_ : str = F"""*Num failures* :{len(job_result['failed'] )} \n"""
lowerCamelCase_ : Any = job_result['''failures''']
lowerCamelCase_ : Any = self.get_reply_blocks(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , text=UpperCamelCase_ )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F"""Results for {job}""" , blocks=UpperCamelCase_ , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : Any = os.environ['''GITHUB_RUN_ID''']
lowerCamelCase_ : Dict = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
lowerCamelCase_ : Tuple = requests.get(__UpperCAmelCase ).json()
lowerCamelCase_ : Union[str, Any] = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
lowerCamelCase_ : Union[str, Any] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__UpperCAmelCase ):
lowerCamelCase_ : Optional[Any] = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , __UpperCAmelCase )
return {}
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : List[Any] = {}
if os.path.exists(__UpperCAmelCase ):
lowerCamelCase_ : Optional[Any] = os.listdir(__UpperCAmelCase )
for file in files:
try:
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , encoding='''utf-8''' ) as f:
lowerCamelCase_ : str = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(__UpperCAmelCase , __UpperCAmelCase )}.""" ) from e
return _artifact
def __snake_case ():
"""simple docstring"""
class lowerCAmelCase__ :
def __init__( self : Any , UpperCamelCase_ : str ) -> int:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = name
lowerCamelCase_ : str = []
def __str__( self : int ) -> Tuple:
"""simple docstring"""
return self.name
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : str ) -> List[str]:
"""simple docstring"""
self.paths.append({'''name''': self.name, '''path''': path} )
lowerCamelCase_ : Dict[str, Artifact] = {}
lowerCamelCase_ : List[str] = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase_ : List[Any] = directory
if artifact_name not in _available_artifacts:
lowerCamelCase_ : List[Any] = Artifact(__UpperCAmelCase )
_available_artifacts[artifact_name].add_path(__UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
__lowerCamelCase : List[Any] = get_job_links()
__lowerCamelCase : str = retrieve_available_artifacts()
__lowerCamelCase : Tuple = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowerCamelCase : Optional[Any] = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowerCamelCase : str = github_actions_job_links.get("""run_doctests""")
__lowerCamelCase : Optional[int] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowerCamelCase : Tuple = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = handle_test_results(artifact["""stats"""])
__lowerCamelCase : Union[str, Any] = failed
__lowerCamelCase : str = success
__lowerCamelCase : int = time_spent[1:-1] + """, """
__lowerCamelCase : List[Any] = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowerCamelCase : List[Any] = line.replace("""FAILED """, """""")
__lowerCamelCase : List[str] = line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = line.split("""::""")
else:
__lowerCamelCase , __lowerCamelCase : str = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowerCamelCase : Tuple = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowerCamelCase : List[Any] = all_failures[test] if test in all_failures else """N/A"""
__lowerCamelCase : Optional[int] = failure
break
__lowerCamelCase : Dict = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 501 | 0 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_lowerCAmelCase = logging.getLogger(__name__)
class __A ( a ):
"""simple docstring"""
def __init__( self , _lowerCamelCase=-1 )-> Union[str, Any]:
# in NER datasets, the last column is usually reserved for NER label
lowercase__ = label_idx
def snake_case_( self , _lowerCamelCase , _lowerCamelCase )-> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowercase__ = mode.value
lowercase__ = os.path.join(_lowerCamelCase , f'''{mode}.txt''' )
lowercase__ = 1
lowercase__ = []
with open(_lowerCamelCase , encoding='''utf-8''' ) as f:
lowercase__ = []
lowercase__ = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
lowercase__ = []
lowercase__ = []
else:
lowercase__ = line.split(''' ''' )
words.append(splits[0] )
if len(_lowerCamelCase ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
return examples
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )-> List[Any]:
lowercase__ = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(_lowerCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowercase__ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(_lowerCamelCase )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def snake_case_( self , _lowerCamelCase )-> List[str]:
if path:
with open(_lowerCamelCase , '''r''' ) as f:
lowercase__ = f.read().splitlines()
if "O" not in labels:
lowercase__ = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __A ( a ):
"""simple docstring"""
def __init__( self )-> List[str]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def snake_case_( self , _lowerCamelCase )-> List[str]:
if path:
with open(_lowerCamelCase , '''r''' ) as f:
lowercase__ = f.read().splitlines()
if "O" not in labels:
lowercase__ = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __A ( a ):
"""simple docstring"""
def snake_case_( self , _lowerCamelCase , _lowerCamelCase )-> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowercase__ = mode.value
lowercase__ = os.path.join(_lowerCamelCase , f'''{mode}.txt''' )
lowercase__ = 1
lowercase__ = []
with open(_lowerCamelCase , encoding='''utf-8''' ) as f:
for sentence in parse_incr(_lowerCamelCase ):
lowercase__ = []
lowercase__ = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
return examples
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )-> Union[str, Any]:
lowercase__ = 0
for sentence in parse_incr(_lowerCamelCase ):
lowercase__ = preds_list[example_id]
lowercase__ = ''''''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(_lowerCamelCase )
example_id += 1
def snake_case_( self , _lowerCamelCase )-> List[str]:
if path:
with open(_lowerCamelCase , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 717 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __A ( a ):
"""simple docstring"""
A_ = ''
A_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
A_ = None # compression type in fsspec. ex: "gzip"
A_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _lowerCamelCase = "" , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase )-> List[str]:
super().__init__(self , **_lowerCamelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowercase__ = fsspec.open(
_lowerCamelCase , mode='''rb''' , protocol=_lowerCamelCase , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowercase__ = os.path.basename(self.file.path.split('''::''' )[0] )
lowercase__ = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowercase__ = None
@classmethod
def snake_case_( cls , _lowerCamelCase )-> List[Any]:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_lowerCamelCase ).lstrip('''/''' )
def snake_case_( self )-> List[str]:
if self.dir_cache is None:
lowercase__ = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowercase__ = {f['''name''']: f}
def snake_case_( self , _lowerCamelCase )-> List[str]:
return self.file.open().read()
def snake_case_( self , _lowerCamelCase , _lowerCamelCase = "rb" , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , )-> Tuple:
lowercase__ = self._strip_protocol(_lowerCamelCase )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class __A ( a ):
"""simple docstring"""
A_ = 'bz2'
A_ = 'bz2'
A_ = '.bz2'
class __A ( a ):
"""simple docstring"""
A_ = 'gzip'
A_ = 'gzip'
A_ = '.gz'
class __A ( a ):
"""simple docstring"""
A_ = 'lz4'
A_ = 'lz4'
A_ = '.lz4'
class __A ( a ):
"""simple docstring"""
A_ = 'xz'
A_ = 'xz'
A_ = '.xz'
class __A ( a ):
"""simple docstring"""
A_ = 'zstd'
A_ = 'zstd'
A_ = '.zst'
def __init__( self , _lowerCamelCase , _lowerCamelCase = "rb" , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = DEFAULT_BLOCK_SIZE , **_lowerCamelCase , )-> Tuple:
super().__init__(
fo=_lowerCamelCase , mode=_lowerCamelCase , target_protocol=_lowerCamelCase , target_options=_lowerCamelCase , block_size=_lowerCamelCase , **_lowerCamelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowercase__ = self.file.__enter__
class __A :
"""simple docstring"""
def __init__( self , _lowerCamelCase )-> Union[str, Any]:
lowercase__ = file_
def __enter__( self )-> int:
self._file.__enter__()
return self
def __exit__( self , *_lowerCamelCase , **_lowerCamelCase )-> List[str]:
self._file.__exit__(*_lowerCamelCase , **_lowerCamelCase )
def __iter__( self )-> int:
return iter(self._file )
def snake_case_( self )-> List[Any]:
return next(self._file )
def __getattr__( self , _lowerCamelCase )-> Any:
return getattr(self._file , _lowerCamelCase )
def fixed_enter(*_lowerCamelCase , **_lowerCamelCase ):
return WrappedFile(_enter(*_lowerCamelCase , **_lowerCamelCase ) )
lowercase__ = fixed_enter
| 318 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a :str = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = ["""pixel_values"""]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BICUBIC , _a = True , _a = None , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , _a = True , **_a , ) -> None:
"""simple docstring"""
super().__init__(**_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = size if size is not None else {"""shortest_edge""": 224}
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(_a , default_to_square=_a )
SCREAMING_SNAKE_CASE__ : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
SCREAMING_SNAKE_CASE__ : int = get_size_dict(_a , default_to_square=_a , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE__ : List[Any] = do_resize
SCREAMING_SNAKE_CASE__ : str = size
SCREAMING_SNAKE_CASE__ : Optional[int] = resample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : str = crop_size
SCREAMING_SNAKE_CASE__ : Optional[int] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE__ : Any = do_convert_rgb
def _a ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_resize_output_image_size(_a , size=size["""shortest_edge"""] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def _a ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a )
def _a ( self , _a , _a , _a = None , **_a , ) -> List[Any]:
"""simple docstring"""
return rescale(_a , scale=_a , data_format=_a , **_a )
def _a ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
"""simple docstring"""
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def _a ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Any = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_size_dict(_a , param_name="""size""" , default_to_square=_a )
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_size_dict(_a , param_name="""crop_size""" , default_to_square=_a )
SCREAMING_SNAKE_CASE__ : Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : int = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : List[str] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE__ : List[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Optional[int] = [to_numpy_array(_a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : int = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : int = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = [to_channel_dimension_format(_a , _a ) for image in images]
SCREAMING_SNAKE_CASE__ : Any = {"""pixel_values""": images}
return BatchFeature(data=_a , tensor_type=_a )
| 680 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> list[float]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Tuple = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(__lowerCAmelCase )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(__lowerCAmelCase )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(__lowerCAmelCase )
if len(__lowerCAmelCase ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'''matrix but received {len(__lowerCAmelCase )} and {rowsa}'''
)
raise ValueError(__lowerCAmelCase )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = table.shape
strictly_diagonally_dominant(__lowerCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any = []
for row in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = 0
for col in range(__lowerCAmelCase ):
if col == row:
SCREAMING_SNAKE_CASE__ : int = table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : Any = (temp + val) / denom
new_val.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = new_val
return [float(__lowerCAmelCase ) for i in new_val]
def _lowercase ( __lowerCAmelCase ) -> bool:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = table.shape
SCREAMING_SNAKE_CASE__ : str = True
for i in range(0 , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@slow
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
SCREAMING_SNAKE_CASE = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(_UpperCamelCase )
from datasets import load_dataset
SCREAMING_SNAKE_CASE = load_dataset("nielsr/rvlcdip-demo" )
SCREAMING_SNAKE_CASE = dataset["train"][0]["image"].convert("RGB" )
SCREAMING_SNAKE_CASE = image_processor(_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.Size((1, 16) )
self.assertEqual(logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=_UpperCamelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
| 713 | import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Dict=13 , _UpperCamelCase : List[Any]=64 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : int=3 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=32 , _UpperCamelCase : str=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Any=37 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : int=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Optional[int]=10 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Union[str, Any]=[1, 16, 4, 4] , _UpperCamelCase : Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def __snake_case( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCamelCase , )
def __snake_case( self : Dict , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case( self : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ : List[Any] = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : int = False
lowercase__ : Any = False
lowercase__ : Optional[int] = False
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def __snake_case( self : Optional[Any] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __snake_case( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=_UpperCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ViTHybridModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 647 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowercase : int = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowercase : List[str] = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
)
lowercase : Optional[Any] = '''|'''.join(sys.argv[1:])
lowercase : Dict = re.compile(rF"""^({joined_dirs}).*?\.py$""")
lowercase : str = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 568 |
import comet # From: unbabel-comet
import torch
import datasets
lowercase : List[Any] = datasets.logging.get_logger(__name__)
lowercase : List[str] = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
lowercase : Dict = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
lowercase : Tuple = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
if self.config_name == "default":
snake_case_ : Union[str, Any] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
snake_case_ : int = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) -> Dict:
if gpus is None:
snake_case_ : Union[str, Any] = 1 if torch.cuda.is_available() else 0
snake_case_ : str = {"src": sources, "mt": predictions, "ref": references}
snake_case_ : Dict = [dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for t in zip(*data.values() )]
snake_case_ , snake_case_ : Union[str, Any] = self.scorer.predict(_SCREAMING_SNAKE_CASE , gpus=_SCREAMING_SNAKE_CASE , progress_bar=_SCREAMING_SNAKE_CASE )
return {"mean_score": mean_score, "scores": scores}
| 568 | 1 |
import math
def lowercase__ ( __A: float ,__A: float ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_6_0:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__A ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 501 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__lowerCamelCase : int = NewType('''DataClass''', Any)
__lowerCamelCase : Optional[Any] = NewType('''DataClassType''', Any)
def lowercase__ ( __A: List[Any] ):
'''simple docstring'''
if isinstance(__A ,__A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def lowercase__ ( __A: list ):
'''simple docstring'''
__magic_name__ : Dict = {str(__A ): choice for choice in choices}
return lambda __A : str_to_choice.get(__A ,__A )
def lowercase__ ( *,
__A: Union[str, List[str]] = None ,__A: str = None ,__A: Any = dataclasses.MISSING ,__A: Callable[[], Any] = dataclasses.MISSING ,__A: dict = None ,**__A: Optional[int] ,):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__magic_name__ : Optional[Any] = {}
if aliases is not None:
__magic_name__ : str = aliases
if help is not None:
__magic_name__ : Optional[int] = help
return dataclasses.field(metadata=__A ,default=__A ,default_factory=__A ,**__A )
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ =42
def __init__( self : List[str] , lowerCamelCase_ : Union[DataClassType, Iterable[DataClassType]] , **lowerCamelCase_ : str ) -> Optional[Any]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__magic_name__ : List[str] = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCamelCase_ )
if dataclasses.is_dataclass(lowerCamelCase_ ):
__magic_name__ : Union[str, Any] = [dataclass_types]
__magic_name__ : Tuple = list(lowerCamelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCamelCase_ )
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_ : ArgumentParser , lowerCamelCase_ : dataclasses.Field ) -> str:
__magic_name__ : int = F'''--{field.name}'''
__magic_name__ : str = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCamelCase_ ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__magic_name__ : Union[str, Any] = kwargs.pop('''aliases''' , [] )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__magic_name__ : Tuple = [aliases]
__magic_name__ : Optional[int] = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(lowerCamelCase_ , '''UnionType''' ) and isinstance(lowerCamelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCamelCase_ ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F''' Problem encountered in field \'{field.name}\'.''' )
if type(lowerCamelCase_ ) not in field.type.__args__:
# filter `str` in Union
__magic_name__ : int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__magic_name__ : str = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__magic_name__ : List[str] = (
field.type.__args__[0] if isinstance(lowerCamelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
__magic_name__ : Union[str, Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__magic_name__ : Any = {}
if origin_type is Literal or (isinstance(field.type , lowerCamelCase_ ) and issubclass(field.type , lowerCamelCase_ )):
if origin_type is Literal:
__magic_name__ : Optional[int] = field.type.__args__
else:
__magic_name__ : Dict = [x.value for x in field.type]
__magic_name__ : Union[str, Any] = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__magic_name__ : List[Any] = field.default
else:
__magic_name__ : List[str] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__magic_name__ : Union[str, Any] = copy(lowerCamelCase_ )
# Hack because type=bool in argparse does not behave as we want.
__magic_name__ : str = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__magic_name__ : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__magic_name__ : int = default
# This tells argparse we accept 0 or 1 value after --field_name
__magic_name__ : Tuple = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__magic_name__ : Any = True
elif isclass(lowerCamelCase_ ) and issubclass(lowerCamelCase_ , lowerCamelCase_ ):
__magic_name__ : Tuple = field.type.__args__[0]
__magic_name__ : List[str] = '''+'''
if field.default_factory is not dataclasses.MISSING:
__magic_name__ : int = field.default_factory()
elif field.default is dataclasses.MISSING:
__magic_name__ : str = True
else:
__magic_name__ : Tuple = field.type
if field.default is not dataclasses.MISSING:
__magic_name__ : str = field.default
elif field.default_factory is not dataclasses.MISSING:
__magic_name__ : Any = field.default_factory()
else:
__magic_name__ : Any = True
parser.add_argument(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__magic_name__ : Dict = False
parser.add_argument(F'''--no_{field.name}''' , action='''store_false''' , dest=field.name , **lowerCamelCase_ )
def UpperCAmelCase__ ( self : Any , lowerCamelCase_ : DataClassType ) -> Optional[int]:
if hasattr(lowerCamelCase_ , '''_argument_group_name''' ):
__magic_name__ : Tuple = self.add_argument_group(dtype._argument_group_name )
else:
__magic_name__ : Any = self
try:
__magic_name__ : Dict[str, type] = get_type_hints(lowerCamelCase_ )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCamelCase_ ):
__magic_name__ : Any = '''.'''.join(map(lowerCamelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(lowerCamelCase_ ):
if not field.init:
continue
__magic_name__ : Tuple = type_hints[field.name]
self._parse_dataclass_field(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : List[Any]=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__magic_name__ : int = []
if args_filename:
args_files.append(Path(lowerCamelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__magic_name__ : str = ArgumentParser()
args_file_parser.add_argument(lowerCamelCase_ , type=lowerCamelCase_ , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__magic_name__ , __magic_name__ : List[str] = args_file_parser.parse_known_args(args=lowerCamelCase_ )
__magic_name__ : List[Any] = vars(lowerCamelCase_ ).get(args_file_flag.lstrip('''-''' ) , lowerCamelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCamelCase_ ) for p in cmd_args_file_paths] )
__magic_name__ : List[str] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__magic_name__ : Optional[int] = file_args + args if args is not None else file_args + sys.argv[1:]
__magic_name__ , __magic_name__ : Tuple = self.parse_known_args(args=lowerCamelCase_ )
__magic_name__ : Any = []
for dtype in self.dataclass_types:
__magic_name__ : str = {f.name for f in dataclasses.fields(lowerCamelCase_ ) if f.init}
__magic_name__ : Tuple = {k: v for k, v in vars(lowerCamelCase_ ).items() if k in keys}
for k in keys:
delattr(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ : Optional[Any] = dtype(**lowerCamelCase_ )
outputs.append(lowerCamelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCamelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def UpperCAmelCase__ ( self : int , lowerCamelCase_ : Dict[str, Any] , lowerCamelCase_ : bool = False ) -> Tuple[DataClass, ...]:
__magic_name__ : int = set(args.keys() )
__magic_name__ : Any = []
for dtype in self.dataclass_types:
__magic_name__ : int = {f.name for f in dataclasses.fields(lowerCamelCase_ ) if f.init}
__magic_name__ : List[str] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__magic_name__ : Optional[Any] = dtype(**lowerCamelCase_ )
outputs.append(lowerCamelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(lowerCamelCase_ )}''' )
return tuple(lowerCamelCase_ )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : bool = False ) -> Tuple[DataClass, ...]:
with open(Path(lowerCamelCase_ ) , encoding='''utf-8''' ) as open_json_file:
__magic_name__ : Any = json.loads(open_json_file.read() )
__magic_name__ : Tuple = self.parse_dict(lowerCamelCase_ , allow_extra_keys=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def UpperCAmelCase__ ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : bool = False ) -> Tuple[DataClass, ...]:
__magic_name__ : Any = self.parse_dict(yaml.safe_load(Path(lowerCamelCase_ ).read_text() ) , allow_extra_keys=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 501 | 1 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
UpperCAmelCase_ : List[Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
UpperCAmelCase_ : Any = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
UpperCAmelCase_ : Tuple = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : bool , _lowerCAmelCase : Optional[Dict[int, int]] = None , _lowerCAmelCase : bool = False , ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
_lowerCamelCase : Any = new_id
# turn into Numpy arrays
_lowerCamelCase : str = np.array(_lowerCAmelCase )
_lowerCamelCase : List[str] = np.array(_lowerCAmelCase )
if reduce_labels:
_lowerCamelCase : Optional[int] = 255
_lowerCamelCase : Tuple = label - 1
_lowerCamelCase : List[Any] = 255
_lowerCamelCase : int = label != ignore_index
_lowerCamelCase : str = np.not_equal(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : List[str] = pred_label[mask]
_lowerCamelCase : str = np.array(_lowerCAmelCase )[mask]
_lowerCamelCase : Union[str, Any] = pred_label[pred_label == label]
_lowerCamelCase : int = np.histogram(_lowerCAmelCase , bins=_lowerCAmelCase , range=(0, num_labels - 1) )[0]
_lowerCamelCase : Dict = np.histogram(_lowerCAmelCase , bins=_lowerCAmelCase , range=(0, num_labels - 1) )[0]
_lowerCamelCase : Tuple = np.histogram(_lowerCAmelCase , bins=_lowerCAmelCase , range=(0, num_labels - 1) )[0]
_lowerCamelCase : List[str] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : bool , _lowerCAmelCase : Optional[Dict[int, int]] = None , _lowerCAmelCase : bool = False , ):
"""simple docstring"""
_lowerCamelCase : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase : Union[str, Any] = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase : Optional[int] = np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = intersect_and_union(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : bool , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[Dict[int, int]] = None , _lowerCAmelCase : bool = False , ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = total_intersect_and_union(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# compute metrics
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
_lowerCamelCase : Optional[Any] = total_area_intersect / total_area_union
_lowerCamelCase : str = total_area_intersect / total_area_label
_lowerCamelCase : Union[str, Any] = np.nanmean(_lowerCAmelCase )
_lowerCamelCase : Dict = np.nanmean(_lowerCAmelCase )
_lowerCamelCase : Any = all_acc
_lowerCamelCase : Tuple = iou
_lowerCamelCase : str = acc
if nan_to_num is not None:
_lowerCamelCase : str = {metric: np.nan_to_num(_lowerCAmelCase , nan=_lowerCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def lowerCamelCase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ),reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
],)
def lowerCamelCase_ ( self : List[Any],__A : Dict,__A : Dict,__A : int,__A : bool,__A : Optional[int] = None,__A : Optional[Dict[int, int]] = None,__A : bool = False,):
_lowerCamelCase : Optional[int] = mean_iou(
results=__A,gt_seg_maps=__A,num_labels=__A,ignore_index=__A,nan_to_num=__A,label_map=__A,reduce_labels=__A,)
return iou_result | 44 |
def __UpperCamelCase (lowerCAmelCase : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(lowerCAmelCase, (list, tuple) ) or not all(
isinstance(lowerCAmelCase, lowerCAmelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
A = A = A = numbers[0]
for i in range(1, len(lowerCAmelCase ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A , A = min_till_now, max_till_now
A = max(lowerCAmelCase, max_till_now * number )
A = min(lowerCAmelCase, min_till_now * number )
# update the maximum product found till now
A = max(lowerCAmelCase, lowerCAmelCase )
return max_prod
| 699 | 0 |
from __future__ import annotations
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase) -> Tuple:
__UpperCamelCase :List[Any] = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''')
if len(A__) != 0:
__UpperCamelCase :Optional[Any] = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(A__) != cols:
raise error
for value in row:
if not isinstance(A__ , (int, float)):
raise error
__UpperCamelCase :List[Any] = rows
else:
__UpperCamelCase :Union[str, Any] = []
def UpperCamelCase__ ( self) -> List[Any]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def UpperCamelCase__ ( self) -> Optional[Any]:
return len(self.rows)
@property
def UpperCamelCase__ ( self) -> Optional[int]:
return len(self.rows[0])
@property
def UpperCamelCase__ ( self) -> str:
return (self.num_rows, self.num_columns)
@property
def UpperCamelCase__ ( self) -> List[Any]:
return self.order[0] == self.order[1]
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Any = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(A__)
def UpperCamelCase__ ( self) -> Tuple:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def UpperCamelCase__ ( self) -> Any:
return bool(self.determinant())
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :str = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(A__).determinant()
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> List[str]:
if (row + column) % 2 == 0:
return self.get_minor(A__ , A__)
return -1 * self.get_minor(A__ , A__)
def UpperCamelCase__ ( self) -> Dict:
return Matrix(
[
[self.get_minor(A__ , A__) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def UpperCamelCase__ ( self) -> Union[str, Any]:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Tuple = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(A__)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Optional[int] = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''')
return self.adjugate() * (1 / determinant)
def __repr__( self) -> Tuple:
return str(self.rows)
def __str__( self) -> Optional[int]:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(A__) for value in row]) + '''.]'''
for row in self.rows
])
+ "]"
)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple:
__UpperCamelCase :List[Any] = TypeError('''Row must be a list containing all ints and/or floats''')
if not isinstance(A__ , A__):
raise type_error
for value in row:
if not isinstance(A__ , (int, float)):
raise type_error
if len(A__) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''')
if position is None:
self.rows.append(A__)
else:
__UpperCamelCase :Optional[int] = self.rows[0:position] + [row] + self.rows[position:]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Dict:
__UpperCamelCase :Any = TypeError(
'''Column must be a list containing all ints and/or floats''')
if not isinstance(A__ , A__):
raise type_error
for value in column:
if not isinstance(A__ , (int, float)):
raise type_error
if len(A__) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''')
if position is None:
__UpperCamelCase :int = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
__UpperCamelCase :Dict = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self , __lowercase) -> Optional[int]:
if not isinstance(A__ , A__):
return NotImplemented
return self.rows == other.rows
def __ne__( self , __lowercase) -> List[Any]:
return not self == other
def __neg__( self) -> Dict:
return self * -1
def __add__( self , __lowercase) -> str:
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self , __lowercase) -> Tuple:
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self , __lowercase) -> str:
if isinstance(A__ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(A__ , A__):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''')
return Matrix(
[
[Matrix.dot_product(A__ , A__) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''')
def __pow__( self , __lowercase) -> List[str]:
if not isinstance(A__ , A__):
raise TypeError('''A Matrix can only be raised to the power of an int''')
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''')
__UpperCamelCase :Union[str, Any] = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def UpperCamelCase__ ( cls , __lowercase , __lowercase) -> Optional[int]:
return sum(row[i] * column[i] for i in range(len(A__)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 | from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowercase = datasets.load_iris()
__lowercase = np.array(data['''data'''])
__lowercase = np.array(data['''target'''])
__lowercase = data['''target_names''']
__lowercase , __lowercase , __lowercase , __lowercase = train_test_split(X, y)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return np.linalg.norm(np.array(SCREAMING_SNAKE_CASE ) - np.array(SCREAMING_SNAKE_CASE ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# List of distances of all points from the point to be classified
__UpperCamelCase :List[str] = []
for data_point in data:
__UpperCamelCase :Optional[int] = euclidean_distance(data_point[0] , SCREAMING_SNAKE_CASE )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__UpperCamelCase :Any = [i[1] for i in sorted(SCREAMING_SNAKE_CASE )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__UpperCamelCase :Union[str, Any] = Counter(SCREAMING_SNAKE_CASE ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 452 | 0 |
from typing import Any
def A ( snake_case__ : list , snake_case__ : list , snake_case__ : dict , snake_case__ : dict , snake_case__ : dict , ) -> list:
'''simple docstring'''
_validation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Creates data structures and fill initial step
__snake_case = {}
__snake_case = {}
for state in states_space:
__snake_case = observations_space[0]
__snake_case = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__snake_case = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case__ ) ):
__snake_case = observations_space[o]
__snake_case = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__snake_case = ''
__snake_case = -1
for k_state in states_space:
__snake_case = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__snake_case = probability
__snake_case = k_state
# Update probabilities and pointers dicts
__snake_case = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__snake_case = arg_max
# The final observation
__snake_case = observations_space[len(snake_case__ ) - 1]
# argmax for given final observation
__snake_case = ''
__snake_case = -1
for k_state in states_space:
__snake_case = probabilities[(k_state, final_observation)]
if probability > max_probability:
__snake_case = probability
__snake_case = k_state
__snake_case = arg_max
# Process pointers backwards
__snake_case = last_state
__snake_case = []
for o in range(len(snake_case__ ) - 1 , -1 , -1 ):
result.append(snake_case__ )
__snake_case = pointers[previous, observations_space[o]]
result.reverse()
return result
def A ( snake_case__ : Any , snake_case__ : Any , snake_case__ : Any , snake_case__ : Any , snake_case__ : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
_validate_lists(snake_case__ , snake_case__ )
_validate_dicts(
snake_case__ , snake_case__ , snake_case__ )
def A ( snake_case__ : Any , snake_case__ : Any , snake_case__ : Any , snake_case__ : Any , snake_case__ : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def A ( snake_case__ : Any , snake_case__ : Any ) -> None:
'''simple docstring'''
_validate_list(snake_case__ , 'observations_space' )
_validate_list(snake_case__ , 'states_space' )
def A ( snake_case__ : Any , snake_case__ : str ) -> None:
'''simple docstring'''
if not isinstance(_object , snake_case__ ):
__snake_case = f"{var_name} must be a list"
raise ValueError(snake_case__ )
else:
for x in _object:
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"{var_name} must be a list of strings"
raise ValueError(snake_case__ )
def A ( snake_case__ : Any , snake_case__ : Any , snake_case__ : Any , ) -> None:
'''simple docstring'''
_validate_dict(snake_case__ , 'initial_probabilities' , snake_case__ )
_validate_nested_dict(snake_case__ , 'transition_probabilities' )
_validate_nested_dict(snake_case__ , 'emission_probabilities' )
def A ( snake_case__ : Any , snake_case__ : str ) -> None:
'''simple docstring'''
_validate_dict(_object , snake_case__ , snake_case__ )
for x in _object.values():
_validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def A ( snake_case__ : Any , snake_case__ : str , snake_case__ : type , snake_case__ : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , snake_case__ ):
__snake_case = f"{var_name} must be a dict"
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__ , snake_case__ ) for x in _object ):
__snake_case = f"{var_name} all keys must be strings"
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__ , snake_case__ ) for x in _object.values() ):
__snake_case = 'nested dictionary ' if nested else ''
__snake_case = f"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 313 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCAmelCase__ : List[Any] = 4
UpperCAmelCase__ : Optional[Any] = 3
class __lowercase ( lowerCamelCase__ ):
pass
def A ( snake_case__ : List[str] ) -> str:
'''simple docstring'''
for shard in shards:
for i in range(snake_case__ ):
yield {"i": i, "shard": shard}
def A ( ) -> List[str]:
'''simple docstring'''
__snake_case = int(os.environ['RANK'] )
__snake_case = int(os.environ['WORLD_SIZE'] )
__snake_case = ArgumentParser()
parser.add_argument('--streaming' , type=snake_case__ )
parser.add_argument('--local_rank' , type=snake_case__ )
parser.add_argument('--num_workers' , type=snake_case__ , default=0 )
__snake_case = parser.parse_args()
__snake_case = args.streaming
__snake_case = args.num_workers
__snake_case = {'shards': [f"shard_{shard_idx}" for shard_idx in range(snake_case__ )]}
__snake_case = IterableDataset.from_generator(snake_case__ , gen_kwargs=snake_case__ )
if not streaming:
__snake_case = Dataset.from_list(list(snake_case__ ) )
__snake_case = split_dataset_by_node(snake_case__ , rank=snake_case__ , world_size=snake_case__ )
__snake_case = torch.utils.data.DataLoader(snake_case__ , num_workers=snake_case__ )
__snake_case = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__snake_case = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__snake_case = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"local_size {local_size} != expected_local_size {expected_local_size}" )
if __name__ == "__main__":
main()
| 313 | 1 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowercase_ ( a__ ):
__UpperCAmelCase = DistilBertTokenizer
__UpperCAmelCase = DistilBertTokenizerFast
__UpperCAmelCase = True
@slow
def __a ( self ):
UpperCamelCase__ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
UpperCamelCase__ = tokenizer.encode("sequence builders" , add_special_tokens=a )
UpperCamelCase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=a )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(a )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
] | 706 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(__A , __A ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(__A , __A ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __A ( a ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase )-> List[Any]:
lowercase__ = params
lowercase__ = np.array(_lowerCamelCase )
lowercase__ = np.array([len(_lowerCamelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , _lowerCamelCase )-> List[str]:
return (self.token_ids[index], self.lengths[index])
def __len__( self )-> Optional[Any]:
return len(self.lengths )
def snake_case_( self )-> str:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def snake_case_( self )-> Union[str, Any]:
lowercase__ = self.params.max_model_input_size
lowercase__ = self.lengths > max_len
logger.info(f'''Splitting {sum(_lowerCamelCase )} too long sequences.''' )
def divide_chunks(_lowerCamelCase , _lowerCamelCase ):
return [l[i : i + n] for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase )]
lowercase__ = []
lowercase__ = []
if self.params.mlm:
lowercase__ , lowercase__ = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
lowercase__ , lowercase__ = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowercase__ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowercase__ = np.insert(_lowerCamelCase , 0 , _lowerCamelCase )
if sub_s[-1] != sep_id:
lowercase__ = np.insert(_lowerCamelCase , len(_lowerCamelCase ) , _lowerCamelCase )
assert len(_lowerCamelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_lowerCamelCase )
new_tok_ids.extend(_lowerCamelCase )
new_lengths.extend([len(_lowerCamelCase ) for l in sub_seqs] )
lowercase__ = np.array(_lowerCamelCase )
lowercase__ = np.array(_lowerCamelCase )
def snake_case_( self )-> Tuple:
lowercase__ = len(self )
lowercase__ = self.lengths > 1_1
lowercase__ = self.token_ids[indices]
lowercase__ = self.lengths[indices]
lowercase__ = len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def snake_case_( self )-> str:
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowercase__ = self.params.special_tok_ids['''unk_token''']
lowercase__ = len(self )
lowercase__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowercase__ = (unk_occs / self.lengths) < 0.5
lowercase__ = self.token_ids[indices]
lowercase__ = self.lengths[indices]
lowercase__ = len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def snake_case_( self )-> Dict:
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def snake_case_( self , _lowerCamelCase )-> Dict:
lowercase__ = [t[0] for t in batch]
lowercase__ = [t[1] for t in batch]
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
# Max for paddings
lowercase__ = max(_lowerCamelCase )
# Pad token ids
if self.params.mlm:
lowercase__ = self.params.special_tok_ids['''pad_token''']
else:
lowercase__ = self.params.special_tok_ids['''unk_token''']
lowercase__ = [list(t.astype(_lowerCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(_lowerCamelCase )) for t in token_ids]
assert len(tk_ ) == len(_lowerCamelCase )
assert all(len(_lowerCamelCase ) == max_seq_len_ for t in tk_ )
lowercase__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowercase__ = torch.tensor(_lowerCamelCase ) # (bs)
return tk_t, lg_t
| 161 |
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( a_ : str = "" ):
__a = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
__a = BeautifulSoup(requests.get(a_ ).text , 'html.parser' )
__a = soup.find_all('td' , attrs='titleColumn' )
__a = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(a_ , a_ )
}
def SCREAMING_SNAKE_CASE ( a_ : str = "IMDb_Top_250_Movies.csv" ):
__a = get_imdb_top_aaa_movies()
with open(a_ , 'w' , newline='' ) as out_file:
__a = csv.writer(a_ )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 539 | 0 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = ["a", "b", "c"]
# Defaults to last layer if both are None
lowercase__ : Optional[Any] = get_aligned_output_features_output_indices(lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["c"] )
self.assertEqual(lowerCamelCase , [2] )
# Out indices set to match out features
lowercase__ : int = get_aligned_output_features_output_indices(["a", "c"] , lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["a", "c"] )
self.assertEqual(lowerCamelCase , [0, 2] )
# Out features set to match out indices
lowercase__ : int = get_aligned_output_features_output_indices(lowerCamelCase , [0, 2] , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["a", "c"] )
self.assertEqual(lowerCamelCase , [0, 2] )
# Out features selected from negative indices
lowercase__ : Optional[Any] = get_aligned_output_features_output_indices(lowerCamelCase , [-3, -1] , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["a", "c"] )
self.assertEqual(lowerCamelCase , [-3, -1] )
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , lowerCamelCase )
# Out features must be a list
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(lowerCamelCase , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(lowerCamelCase , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def __a ( self ) -> int:
"""simple docstring"""
lowercase__ : str = BackboneMixin()
lowercase__ : Union[str, Any] = ["a", "b", "c"]
lowercase__ : Dict = ["a", "c"]
lowercase__ : Optional[int] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowercase__ : List[str] = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowercase__ : Union[str, Any] = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] ) | 709 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__a : str = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__a : Dict = parser.parse_args()
__a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__a : Optional[int] = CLIPImageProcessor()
__a : Tuple = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__a : Any = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path) | 298 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ )
move_disk(lowercase_ , lowercase_ )
move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
print('''moving disk from''' , lowercase_ , '''to''' , lowercase_ )
def SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
A__ = int(input('''Height of hanoi: ''' ).strip() )
move_tower(lowercase_ , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 87 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''mobilenet_v1'''
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Optional[Any]=224 , UpperCAmelCase__ : Optional[int]=1.0 , UpperCAmelCase__ : Optional[int]=8 , UpperCAmelCase__ : Tuple="relu6" , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict=0.999 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[int]=0.001 , **UpperCAmelCase__ : Dict , ) ->List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''')
A__ = num_channels
A__ = image_size
A__ = depth_multiplier
A__ = min_depth
A__ = hidden_act
A__ = tf_padding
A__ = classifier_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : Any) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([('''pixel_values''', {0: '''batch'''})])
@property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})])
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})])
@property
def SCREAMING_SNAKE_CASE ( self : int) ->float:
'''simple docstring'''
return 1e-4
| 87 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 144 |
def _a ( lowerCamelCase__ ) -> int:
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ : int = set({'(', '[', '{'} )
lowerCamelCase_ : Optional[Any] = set({')', ']', '}'} )
lowerCamelCase_ : Dict = {'{': '}', '[': ']', '(': ')'}
for i in range(len(lowerCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase__ ) == 0
def _a ( ) -> str:
lowerCamelCase_ : Dict = input('Enter sequence of brackets: ' )
if is_balanced(lowerCamelCase__ ):
print(lowerCamelCase__ , 'is balanced' )
else:
print(lowerCamelCase__ , 'is not balanced' )
if __name__ == "__main__":
main()
| 144 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = False ) -> float:
if not arr:
return 0
a_ : Dict = 0 if allow_empty_subarrays else float("-inf" )
a_ : Dict = 0.0
for num in arr:
a_ : Union[str, Any] = max(0 if allow_empty_subarrays else num, curr_sum + num )
a_ : List[Any] = max(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""") | 237 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 237 | 1 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowercase( lowercase__ ):
'''simple docstring'''
def __init__( self , __a , __a = None , __a = None , __a = None , __a = False , __a = False , __a = None , __a = None , **__a , ):
super().__init__(
__a , split=__a , features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , num_proc=__a , **__a , )
__lowerCamelCase : List[Any] = field
__lowerCamelCase : Dict = path_or_paths if isinstance(__a , __a ) else {self.split: path_or_paths}
__lowerCamelCase : str = Json(
cache_dir=__a , data_files=__a , features=__a , field=__a , **__a , )
def snake_case_ ( self ):
# Build iterable dataset
if self.streaming:
__lowerCamelCase : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase : Any = None
__lowerCamelCase : Any = None
__lowerCamelCase : str = None
__lowerCamelCase : int = None
self.builder.download_and_prepare(
download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , num_proc=self.num_proc , )
__lowerCamelCase : List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=__a , in_memory=self.keep_in_memory )
return dataset
class __lowercase:
'''simple docstring'''
def __init__( self , __a , __a , __a = None , __a = None , **__a , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
__lowerCamelCase : Optional[int] = dataset
__lowerCamelCase : Optional[int] = path_or_buf
__lowerCamelCase : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowerCamelCase : str = num_proc
__lowerCamelCase : Optional[Any] = 'utf-8'
__lowerCamelCase : Optional[int] = to_json_kwargs
def snake_case_ ( self ):
__lowerCamelCase : Tuple = self.to_json_kwargs.pop('path_or_buf' , __a )
__lowerCamelCase : Any = self.to_json_kwargs.pop('orient' , 'records' )
__lowerCamelCase : List[Any] = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
__lowerCamelCase : Tuple = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
__lowerCamelCase : int = self.to_json_kwargs.pop('compression' , __a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=__a ) as buffer:
__lowerCamelCase : int = self._write(file_obj=__a , orient=__a , lines=__a , index=__a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
' was passed. Please provide a local path instead.' )
__lowerCamelCase : Dict = self._write(
file_obj=self.path_or_buf , orient=__a , lines=__a , index=__a , **self.to_json_kwargs )
return written
def snake_case_ ( self , __a ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = args
__lowerCamelCase : int = query_table(
table=self.dataset.data , key=slice(__a , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowerCamelCase : List[str] = batch.to_pandas().to_json(
path_or_buf=__a , orient=__a , lines=__a , index=__a , **__a )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def snake_case_ ( self , __a , __a , __a , __a , **__a , ):
__lowerCamelCase : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
__lowerCamelCase : List[str] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__a )
else:
__lowerCamelCase , __lowerCamelCase : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __a , __a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(__a )
return written
| 263 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
a_ : Tuple = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
a_ : Optional[Any] = '''</w>'''
a_ : Dict = '''@@ '''
def UpperCAmelCase ( A__: Optional[Any] ) -> Optional[int]:
__lowerCamelCase : Any = set()
__lowerCamelCase : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase : Optional[int] = char
return pairs
# Speech2Text2 has no max input length
a_ : Union[str, Any] = {'''facebook/s2t-wav2vec2-large-en-de''': 10_24}
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : List[str] = VOCAB_FILES_NAMES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Dict = ['input_ids', 'attention_mask']
def __init__( self , __a , __a="<s>" , __a="<pad>" , __a="</s>" , __a="<unk>" , __a=False , __a=None , **__a , ):
super().__init__(
unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , do_lower_case=__a , **__a , )
__lowerCamelCase : List[Any] = do_lower_case
with open(__a , encoding='utf-8' ) as vocab_handle:
__lowerCamelCase : str = json.load(__a )
__lowerCamelCase : int = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
__lowerCamelCase : str = None
__lowerCamelCase : Optional[int] = None
else:
with open(__a , encoding='utf-8' ) as merges_handle:
__lowerCamelCase : Optional[int] = merges_handle.read().split('\n' )[:-1]
__lowerCamelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
__lowerCamelCase : Any = dict(zip(__a , range(len(__a ) ) ) )
__lowerCamelCase : List[str] = {}
@property
def snake_case_ ( self ):
return len(self.decoder )
def snake_case_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self , __a ):
__lowerCamelCase : Optional[Any] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
__lowerCamelCase : Union[str, Any] = get_pairs(__a )
if not pairs:
return token
while True:
__lowerCamelCase : Dict = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase : Optional[Any] = bigram
__lowerCamelCase : Tuple = []
__lowerCamelCase : Union[str, Any] = 0
while i < len(__a ):
try:
__lowerCamelCase : Union[str, Any] = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase : Any = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase : List[str] = tuple(__a )
__lowerCamelCase : Any = new_word
if len(__a ) == 1:
break
else:
__lowerCamelCase : Tuple = get_pairs(__a )
__lowerCamelCase : Optional[int] = ' '.join(__a )
if word == "\n " + BPE_TOKEN_MERGES:
__lowerCamelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(__a ):
__lowerCamelCase : Tuple = word.replace(__a , '' )
__lowerCamelCase : Optional[Any] = word.replace(' ' , __a )
__lowerCamelCase : Union[str, Any] = word
return word
def snake_case_ ( self , __a ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
__lowerCamelCase : Any = text.lower()
__lowerCamelCase : Union[str, Any] = text.split()
__lowerCamelCase : str = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__a ).split(' ' ) ) )
return split_tokens
def snake_case_ ( self , __a ):
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def snake_case_ ( self , __a ):
__lowerCamelCase : int = self.decoder.get(__a , self.unk_token )
return result
def snake_case_ ( self , __a ):
__lowerCamelCase : Any = ' '.join(__a )
# make sure @@ tokens are concatenated
__lowerCamelCase : Union[str, Any] = ''.join(string.split(__a ) )
return string
def snake_case_ ( self , __a , __a = None ):
if not os.path.isdir(__a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCamelCase : int = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Any = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + '\n' )
__lowerCamelCase : Optional[int] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__a , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
__lowerCamelCase : Optional[int] = token_index
writer.write(' '.join(__a ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 263 | 1 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCamelCase : str = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self :Any , *__magic_name__ :Any , **__magic_name__ :Union[str, Any] ):
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(_lowercase )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Tuple=None , __magic_name__ :str=None , __magic_name__ :Optional[Any]=None , **__magic_name__ :List[str] ):
'''simple docstring'''
a = {}, {}
if padding is not None:
a = padding
if truncation is not None:
a = truncation
if top_k is not None:
a = top_k
return preprocess_params, {}, postprocess_params
def __call__( self :Optional[int] , __magic_name__ :Union["Image.Image", str] , __magic_name__ :str = None , **__magic_name__ :Optional[Any] ):
'''simple docstring'''
if isinstance(_lowercase , (Image.Image, str) ) and isinstance(_lowercase , _lowercase ):
a = {"image": image, "question": question}
else:
a = image
a = super().__call__(_lowercase , **_lowercase )
return results
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Optional[Any] , __magic_name__ :List[str]=False , __magic_name__ :List[Any]=False ):
'''simple docstring'''
a = load_image(inputs["""image"""] )
a = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=_lowercase , truncation=_lowercase )
a = self.image_processor(images=_lowercase , return_tensors=self.framework )
model_inputs.update(_lowercase )
return model_inputs
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Optional[int] ):
'''simple docstring'''
a = self.model(**_lowercase )
return model_outputs
def lowerCamelCase__ ( self :Dict , __magic_name__ :Tuple , __magic_name__ :int=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
a = self.model.config.num_labels
if self.framework == "pt":
a = model_outputs.logits.sigmoid()[0]
a = probs.topk(_lowercase )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
a = scores.tolist()
a = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_lowercase , _lowercase )]
| 468 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
A = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[int] , lowerCamelCase_: List[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = set()
snake_case : Optional[int] = []
def parse_line(lowerCamelCase_: Union[str, Any] ):
for line in fp:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Optional[Any] = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase_ ) > 0:
snake_case : str = "\n".join(lowerCamelCase_ )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(lowerCamelCase_ )
buffer.clear()
continue
else:
snake_case : Dict = line.strip()
buffer.append(lowerCamelCase_ )
if from_gh:
for filename in os.listdir(lowerCamelCase_ ):
snake_case : Tuple = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not os.path.isdir(lowerCamelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase_ ) as fp:
parse_line(lowerCamelCase_ )
else:
try:
with zipfile.ZipFile(lowerCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase_ ) as fp:
parse_line(lowerCamelCase_ )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Union[str, Any] , lowerCamelCase_: Any ):
"""simple docstring"""
snake_case : Dict = set()
snake_case : str = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for p in os.listdir(lowerCamelCase_ ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase_ , lowerCamelCase_ ) )
return selected_warnings
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Dict ):
"""simple docstring"""
return values.split("," )
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
A = parser.parse_args()
A = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
A = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
A = extract_warnings(args.output_dir, args.targets)
A = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 449 | 0 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Optional[int] = "Speech2TextFeatureExtractor"
lowerCamelCase__ : Any = "Speech2TextTokenizer"
def __init__( self , a , a ) -> Union[str, Any]:
super().__init__(a , a )
lowercase__ : List[str] = self.feature_extractor
lowercase__ : str = False
def __call__( self , *a , **a ) -> Optional[int]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a , **a )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowercase__ : int = kwargs.pop('raw_speech' )
else:
lowercase__ : Optional[Any] = kwargs.pop('audio' , a )
lowercase__ : Optional[int] = kwargs.pop('sampling_rate' , a )
lowercase__ : int = kwargs.pop('text' , a )
if len(a ) > 0:
lowercase__ : Dict = args[0]
lowercase__ : str = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowercase__ : Dict = self.feature_extractor(a , *a , sampling_rate=a , **a )
if text is not None:
lowercase__ : Optional[int] = self.tokenizer(a , **a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase__ : Tuple = encodings['input_ids']
return inputs
def _UpperCAmelCase ( self , *a , **a ) -> str:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self , *a , **a ) -> List[str]:
return self.tokenizer.decode(*a , **a )
@contextmanager
def _UpperCAmelCase ( self ) -> int:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowercase__ : Optional[int] = True
lowercase__ : str = self.tokenizer
yield
lowercase__ : Union[str, Any] = self.feature_extractor
lowercase__ : List[Any] = False
| 645 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple:
lowercase__ : str = load_in_abit
lowercase__ : str = load_in_abit
lowercase__ : List[str] = llm_inta_threshold
lowercase__ : Dict = llm_inta_skip_modules
lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowercase__ : Any = llm_inta_has_fpaa_weight
lowercase__ : Any = bnb_abit_quant_type
lowercase__ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__ : Dict = torch.floataa
elif isinstance(a , a ):
lowercase__ : Any = getattr(a , a )
elif isinstance(a , torch.dtype ):
lowercase__ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _UpperCAmelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]:
lowercase__ : List[Any] = cls(**a )
lowercase__ : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCAmelCase ( self , a ) -> Dict:
with open(a , 'w' , encoding='utf-8' ) as writer:
lowercase__ : Any = self.to_dict()
lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
writer.write(a )
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _UpperCAmelCase ( self , a = True ) -> str:
if use_diff is True:
lowercase__ : List[Any] = self.to_diff_dict()
else:
lowercase__ : List[str] = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = self.to_dict()
# get the default config dict
lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict()
lowercase__ : int = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__ : Optional[int] = value
return serializable_config_dict
| 645 | 1 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = ['image_processor', 'tokenizer']
UpperCamelCase__ = 'BlipImageProcessor'
UpperCamelCase__ = 'AutoTokenizer'
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
super().__init__(snake_case_ , snake_case_ )
# add QFormer tokenizer
lowercase =qformer_tokenizer
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
lowercase =BatchFeature()
if text is not None:
lowercase =self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
encoding.update(snake_case_ )
lowercase =self.qformer_tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
lowercase =qformer_text_encoding.pop('''input_ids''' )
lowercase =qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
lowercase =self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def _A( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _A( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _A( self ):
lowercase =self.tokenizer.model_input_names
lowercase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _A( self , snake_case_ , **snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
lowercase =os.path.join(snake_case_ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(snake_case_ )
return super().save_pretrained(snake_case_ , **snake_case_ )
@classmethod
def _A( cls , snake_case_ , **snake_case_ ):
lowercase =AutoTokenizer.from_pretrained(snake_case_ , subfolder='''qformer_tokenizer''' )
lowercase =cls._get_arguments_from_pretrained(snake_case_ , **snake_case_ )
args.append(snake_case_ )
return cls(*snake_case_ )
| 72 |
from math import sqrt
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( _SCREAMING_SNAKE_CASE = 1_0_0_0_1 ):
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(_SCREAMING_SNAKE_CASE ):
count += 1
while count != nth:
number += 2
if is_prime(_SCREAMING_SNAKE_CASE ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 402 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class A__ :
lowerCAmelCase__ : CommonSchedulerState
# setable values
lowerCAmelCase__ : jnp.ndarray
lowerCAmelCase__ : jnp.ndarray
lowerCAmelCase__ : Optional[int] = None
@classmethod
def a__ ( cls : List[str] , _UpperCAmelCase : CommonSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray ) -> Optional[int]:
"""simple docstring"""
return cls(common=_UpperCAmelCase , init_noise_sigma=_UpperCAmelCase , timesteps=_UpperCAmelCase )
@dataclass
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : DDPMSchedulerState
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCAmelCase__ : jnp.dtype
@property
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
return True
@register_to_config
def __init__( self : Dict , _UpperCAmelCase : int = 10_00 , _UpperCAmelCase : float = 0.0_001 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : str = "linear" , _UpperCAmelCase : Optional[jnp.ndarray] = None , _UpperCAmelCase : str = "fixed_small" , _UpperCAmelCase : bool = True , _UpperCAmelCase : str = "epsilon" , _UpperCAmelCase : jnp.dtype = jnp.floataa , ) -> Tuple:
"""simple docstring"""
__lowercase = dtype
def a__ ( self : Tuple , _UpperCAmelCase : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
__lowercase = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__lowercase = jnp.array(1.0 , dtype=self.dtype )
__lowercase = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_UpperCAmelCase , init_noise_sigma=_UpperCAmelCase , timesteps=_UpperCAmelCase , )
def a__ ( self : List[str] , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : Optional[int] = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def a__ ( self : Optional[Any] , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : int , _UpperCAmelCase : Tuple = () ) -> DDPMSchedulerState:
"""simple docstring"""
__lowercase = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__lowercase = (jnp.arange(0 , _UpperCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase , )
def a__ ( self : Tuple , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Dict=None ) -> Any:
"""simple docstring"""
__lowercase = state.common.alphas_cumprod[t]
__lowercase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__lowercase = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__lowercase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__lowercase = jnp.clip(_UpperCAmelCase , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__lowercase = jnp.log(jnp.clip(_UpperCAmelCase , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
__lowercase = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__lowercase = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__lowercase = variance
__lowercase = state.common.betas[t]
__lowercase = (predicted_variance + 1) / 2
__lowercase = frac * max_log + (1 - frac) * min_log
return variance
def a__ ( self : Tuple , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : Optional[jax.random.KeyArray] = None , _UpperCAmelCase : bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
__lowercase = timestep
if key is None:
__lowercase = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__lowercase , __lowercase = jnp.split(_UpperCAmelCase , sample.shape[1] , axis=1 )
else:
__lowercase = None
# 1. compute alphas, betas
__lowercase = state.common.alphas_cumprod[t]
__lowercase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__lowercase = 1 - alpha_prod_t
__lowercase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__lowercase = model_output
elif self.config.prediction_type == "v_prediction":
__lowercase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__lowercase = jnp.clip(_UpperCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__lowercase = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__lowercase = jax.random.split(_UpperCAmelCase , num=1 )
__lowercase = jax.random.normal(_UpperCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_UpperCAmelCase , _UpperCAmelCase , predicted_variance=_UpperCAmelCase ) ** 0.5) * noise
__lowercase = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__lowercase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_UpperCAmelCase , state=_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Tuple , _UpperCAmelCase : DDPMSchedulerState , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __len__( self : int ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 706 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """Hello, World!"""
SCREAMING_SNAKE_CASE__ = """en_XX"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ) -> Optional[int]:
__lowercase = Path('data_bin' )
__lowercase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe='sentencepiece' , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
__lowercase = xmod.model.encoder.sentence_encoder
__lowercase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , SCREAMING_SNAKE_CASE )
__lowercase = XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase = xmod_sent_encoder.embed_tokens.weight
__lowercase = xmod_sent_encoder.embed_positions.weight
__lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowercase = xmod_sent_encoder.layernorm_embedding.weight
__lowercase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase = model.roberta.encoder.layer[i]
__lowercase = xmod_sent_encoder.layers[i]
# self attention
__lowercase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__lowercase = xmod_layer.self_attn.q_proj.weight
__lowercase = xmod_layer.self_attn.q_proj.bias
__lowercase = xmod_layer.self_attn.k_proj.weight
__lowercase = xmod_layer.self_attn.k_proj.bias
__lowercase = xmod_layer.self_attn.v_proj.weight
__lowercase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowercase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__lowercase = xmod_layer.self_attn.out_proj.weight
__lowercase = xmod_layer.self_attn.out_proj.bias
__lowercase = xmod_layer.self_attn_layer_norm.weight
__lowercase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowercase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
# output
__lowercase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
__lowercase = xmod_layer.final_layer_norm.weight
__lowercase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowercase = xmod_layer.adapter_layer_norm.weight
__lowercase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowercase = bert_output.adapter_modules[lang_code]
__lowercase = xmod_layer.adapter_modules[lang_code]
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowercase = xmod_sent_encoder.layer_norm.weight
__lowercase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].dense.weight
__lowercase = xmod.model.classification_heads['mnli'].dense.bias
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight
__lowercase = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowercase = xmod.model.encoder.lm_head.dense.weight
__lowercase = xmod.model.encoder.lm_head.dense.bias
__lowercase = xmod.model.encoder.lm_head.layer_norm.weight
__lowercase = xmod.model.encoder.lm_head.layer_norm.bias
__lowercase = xmod.model.encoder.lm_head.weight
__lowercase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase = xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
__lowercase = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
__lowercase = xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowercase = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 688 | 0 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=24 , UpperCamelCase_=2 , UpperCamelCase_=6 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3 , UpperCamelCase_=None , UpperCamelCase_=10_00 , ) -> Any:
__lowercase : Optional[Any] = parent
__lowercase : List[str] = batch_size
__lowercase : Any = seq_length
__lowercase : List[Any] = is_training
__lowercase : Optional[Any] = use_input_mask
__lowercase : Dict = use_token_type_ids
__lowercase : int = use_labels
__lowercase : Any = vocab_size
__lowercase : str = hidden_size
__lowercase : List[str] = num_hidden_layers
__lowercase : Tuple = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : List[str] = hidden_act
__lowercase : Tuple = hidden_dropout_prob
__lowercase : Dict = attention_probs_dropout_prob
__lowercase : str = max_position_embeddings
__lowercase : str = type_vocab_size
__lowercase : Tuple = type_sequence_label_size
__lowercase : Tuple = initializer_range
__lowercase : Dict = num_labels
__lowercase : List[Any] = scope
__lowercase : Optional[Any] = range_bbox
def _lowerCamelCase ( self ) -> int:
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowercase : int = bbox[i, j, 3]
__lowercase : str = bbox[i, j, 1]
__lowercase : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowercase : Tuple = bbox[i, j, 2]
__lowercase : Optional[int] = bbox[i, j, 0]
__lowercase : List[Any] = t
__lowercase : Union[str, Any] = None
if self.use_input_mask:
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowercase : Tuple = None
if self.use_token_type_ids:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Any = None
__lowercase : Optional[int] = None
if self.use_labels:
__lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Any = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _lowerCamelCase ( self ) -> List[Any]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Union[str, Any]:
__lowercase : int = LiltModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowercase : Any = model(UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
__lowercase : int = model(UpperCamelCase_ , bbox=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
__lowercase : List[Any] = model(UpperCamelCase_ , bbox=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Dict:
__lowercase : Any = self.num_labels
__lowercase : Dict = LiltForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowercase : List[Any] = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> List[Any]:
__lowercase : Optional[Any] = LiltForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowercase : List[str] = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : List[str] = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) : List[str] = config_and_inputs
__lowercase : str = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( snake_case , snake_case , snake_case , unittest.TestCase ):
UpperCamelCase =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase =(
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase =False
UpperCamelCase =False
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
return True
def _lowerCamelCase ( self ) -> Any:
__lowercase : Union[str, Any] = LiltModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def _lowerCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase : str = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ )
@slow
def _lowerCamelCase ( self ) -> List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Dict = LiltModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : List[Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(UpperCamelCase_ )
__lowercase : Union[str, Any] = torch.tensor([[1, 2]] , device=UpperCamelCase_ )
__lowercase : Optional[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCamelCase_ )
# forward pass
with torch.no_grad():
__lowercase : Tuple = model(input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ )
__lowercase : Dict = torch.Size([1, 2, 7_68] )
__lowercase : str = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=UpperCamelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCamelCase_ , atol=1E-3 ) )
| 76 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase : int = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" ,from_pt=snake_case ,dtype=jnp.bfloataa )
lowercase , lowercase : str = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,controlnet=snake_case ,from_pt=snake_case ,dtype=jnp.bfloataa )
lowercase : Any = controlnet_params
lowercase : List[str] = """bird"""
lowercase : Tuple = jax.device_count()
lowercase : Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
lowercase : Tuple = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase : str = jax.random.PRNGKey(0 )
lowercase : Any = jax.random.split(snake_case ,jax.device_count() )
lowercase : Optional[int] = replicate(snake_case )
lowercase : Optional[int] = shard(snake_case )
lowercase : List[str] = shard(snake_case )
lowercase : str = pipe(
prompt_ids=snake_case ,image=snake_case ,params=snake_case ,prng_seed=snake_case ,num_inference_steps=50 ,jit=snake_case ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowercase : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase : List[str] = images[0, 253:256, 253:256, -1]
lowercase : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase : Optional[Any] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase : Any = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" ,from_pt=snake_case ,dtype=jnp.bfloataa )
lowercase , lowercase : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,controlnet=snake_case ,from_pt=snake_case ,dtype=jnp.bfloataa )
lowercase : Any = controlnet_params
lowercase : Union[str, Any] = """Chef in the kitchen"""
lowercase : int = jax.device_count()
lowercase : str = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
lowercase : Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase : Optional[int] = jax.random.PRNGKey(0 )
lowercase : Dict = jax.random.split(snake_case ,jax.device_count() )
lowercase : int = replicate(snake_case )
lowercase : str = shard(snake_case )
lowercase : Dict = shard(snake_case )
lowercase : Optional[Any] = pipe(
prompt_ids=snake_case ,image=snake_case ,params=snake_case ,prng_seed=snake_case ,num_inference_steps=50 ,jit=snake_case ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowercase : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase : Optional[Any] = images[0, 253:256, 253:256, -1]
lowercase : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase : Tuple = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 336 | 0 |
import os
import numpy
import onnx
def _snake_case (_snake_case : Optional[Any] , _snake_case : Optional[int]) -> Tuple:
_lowercase =a.name
_lowercase =b.name
_lowercase =''
_lowercase =''
_lowercase =a == b
_lowercase =name_a
_lowercase =name_b
return res
def _snake_case (_snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Any) -> Union[str, Any]:
for i, input_name in enumerate(node_proto.input):
if input_name == name:
node_proto.input.insert(_snake_case , _snake_case)
node_proto.input.pop(i + 1)
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case)
_graph_replace_input_with(node_proto.attribute[1].g , _snake_case , _snake_case)
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case)
def _snake_case (_snake_case : Optional[Any] , _snake_case : str , _snake_case : Union[str, Any]) -> Dict:
for n in graph_proto.node:
_node_replace_input_with(_snake_case , _snake_case , _snake_case)
def _snake_case (_snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : List[Any]) -> Optional[Any]:
_lowercase =list(model.graph.initializer)
_lowercase =list(model_without_ext.graph.initializer)
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_lowercase =inits[i].name
_lowercase =inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i])
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _snake_case , _snake_case)
def _snake_case (_snake_case : int) -> Any:
_lowercase =os.path.dirname(_snake_case)
_lowercase =os.path.basename(_snake_case)
_lowercase =onnx.load(os.path.join(_snake_case , _snake_case))
_lowercase =list(model.graph.initializer)
_lowercase =set()
_lowercase ={}
_lowercase =[]
_lowercase =0
for i in range(len(_snake_case)):
if i in dup_set:
continue
for j in range(i + 1 , len(_snake_case)):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j]):
dup_set.add(_snake_case)
dup_set.add(_snake_case)
_lowercase =inits[j].data_type
_lowercase =numpy.prod(inits[j].dims)
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , _snake_case)
total_reduced_size += mem_size
_lowercase =inits[i].name
_lowercase =inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_snake_case)
else:
_lowercase =[name_j]
ind_to_replace.append((j, i))
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB')
_lowercase =sorted(_snake_case)
_remove_dup_initializers_from_model(_snake_case , _snake_case , _snake_case)
_lowercase ='optimized_' + model_file_name
_lowercase =os.path.join(_snake_case , _snake_case)
onnx.save(_snake_case , _snake_case)
return new_model
| 557 |
import os
def _snake_case (_snake_case : Optional[Any]) -> int:
_lowercase =len(grid[0])
_lowercase =len(_snake_case)
_lowercase =0
_lowercase =0
_lowercase =0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_snake_case):
for j in range(n_rows - 3):
_lowercase =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_lowercase =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_lowercase =(
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_lowercase =(
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_lowercase =max(
_snake_case , _snake_case , _snake_case , _snake_case)
if max_product > largest:
_lowercase =max_product
return largest
def _snake_case () -> str:
_lowercase =[]
with open(os.path.dirname(_snake_case) + '/grid.txt') as file:
for line in file:
grid.append(line.strip('\n').split(' '))
_lowercase =[[int(_snake_case) for i in grid[j]] for j in range(len(_snake_case))]
return largest_product(_snake_case)
if __name__ == "__main__":
print(solution())
| 557 | 1 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : List[str] = """vision-encoder-decoder"""
_snake_case : Optional[Any] = True
def __init__( self :List[Any] , **lowerCamelCase__ :List[str] ):
super().__init__(**lowerCamelCase__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
UpperCamelCase__ :List[Any] = kwargs.pop("""encoder""" )
UpperCamelCase__ :Any = encoder_config.pop("""model_type""" )
UpperCamelCase__ :Optional[int] = kwargs.pop("""decoder""" )
UpperCamelCase__ :Optional[Any] = decoder_config.pop("""model_type""" )
UpperCamelCase__ :Optional[Any] = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :List[Any] = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :Dict = True
@classmethod
def __a ( cls :Union[str, Any] , lowerCamelCase__ :PretrainedConfig , lowerCamelCase__ :PretrainedConfig , **lowerCamelCase__ :List[Any] ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
UpperCamelCase__ :Dict = True
UpperCamelCase__ :Optional[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase__ )
def __a ( self :Dict ):
UpperCamelCase__ :Any = copy.deepcopy(self.__dict__ )
UpperCamelCase__ :str = self.encoder.to_dict()
UpperCamelCase__ :Any = self.decoder.to_dict()
UpperCamelCase__ :List[Any] = self.__class__.model_type
return output
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : List[Any] = version.parse("""1.11""" )
@property
def __a ( self :int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __a ( self :Union[str, Any] ):
return 1e-4
@property
def __a ( self :List[Any] ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
@property
def __a ( self :Optional[Any] ):
UpperCamelCase__ :str = OrderedDict()
UpperCamelCase__ :Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
UpperCamelCase__ :List[str] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
UpperCamelCase__ :str = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def __a ( self :int , lowerCamelCase__ :"PreTrainedTokenizerBase" , lowerCamelCase__ :int = -1 , lowerCamelCase__ :int = -1 , lowerCamelCase__ :bool = False , lowerCamelCase__ :Optional["TensorType"] = None , ):
import torch
UpperCamelCase__ :Dict = OrderedDict()
UpperCamelCase__ :Tuple = super().generate_dummy_inputs(
lowerCamelCase__ , batch_size=lowerCamelCase__ , seq_length=lowerCamelCase__ , is_pair=lowerCamelCase__ , framework=lowerCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ :str = dummy_input["""input_ids"""].shape
UpperCamelCase__ :str = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase__ :Optional[Any] = dummy_input.pop("""input_ids""" )
UpperCamelCase__ :List[Any] = dummy_input.pop("""attention_mask""" )
UpperCamelCase__ :str = torch.zeros(lowerCamelCase__ )
return common_inputs
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
@property
def __a ( self :List[str] ):
pass
def __a ( self :List[str] , lowerCamelCase__ :PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(lowerCamelCase__ )
def __a ( self :Dict , lowerCamelCase__ :PretrainedConfig , lowerCamelCase__ :PretrainedConfig , lowerCamelCase__ :str = "default" ):
UpperCamelCase__ :Tuple = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowerCamelCase__ , lowerCamelCase__ ) | 45 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCAmelCase : Tuple = r"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "rag"
__magic_name__ = True
def __init__( self , snake_case__=None , snake_case__=True , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=" / " , snake_case__=" // " , snake_case__=5 , snake_case__=300 , snake_case__=768 , snake_case__=8 , snake_case__="wiki_dpr" , snake_case__="train" , snake_case__="compressed" , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=0.0 , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
bos_token_id=snake_case__ , pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , prefix=snake_case__ , vocab_size=snake_case__ , **snake_case__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_lowerCAmelCase : Optional[Any] = kwargs.pop('question_encoder' )
_lowerCAmelCase : List[Any] = question_encoder_config.pop('model_type' )
_lowerCAmelCase : List[str] = kwargs.pop('generator' )
_lowerCAmelCase : Tuple = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : Union[str, Any] = AutoConfig.for_model(snake_case__ , **snake_case__ )
_lowerCAmelCase : List[str] = AutoConfig.for_model(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[int] = reduce_loss
_lowerCAmelCase : Optional[Any] = label_smoothing
_lowerCAmelCase : Dict = exclude_bos_score
_lowerCAmelCase : Optional[int] = do_marginalize
_lowerCAmelCase : List[str] = title_sep
_lowerCAmelCase : Optional[Any] = doc_sep
_lowerCAmelCase : str = n_docs
_lowerCAmelCase : Optional[int] = max_combined_length
_lowerCAmelCase : Dict = dataset
_lowerCAmelCase : Optional[int] = dataset_split
_lowerCAmelCase : str = index_name
_lowerCAmelCase : Tuple = retrieval_vector_size
_lowerCAmelCase : Any = retrieval_batch_size
_lowerCAmelCase : Any = passages_path
_lowerCAmelCase : Tuple = index_path
_lowerCAmelCase : Any = use_dummy_dataset
_lowerCAmelCase : Union[str, Any] = output_retrieved
_lowerCAmelCase : List[str] = do_deduplication
_lowerCAmelCase : Optional[int] = use_cache
if self.forced_eos_token_id is None:
_lowerCAmelCase : Tuple = getattr(self.generator , 'forced_eos_token_id' , snake_case__ )
@classmethod
def a ( cls , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : List[str] = self.question_encoder.to_dict()
_lowerCAmelCase : Optional[Any] = self.generator.to_dict()
_lowerCAmelCase : Optional[int] = self.__class__.model_type
return output
| 444 | 0 |
"""simple docstring"""
import os
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ = "input.txt" ):
with open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ ) ) as input_file:
SCREAMING_SNAKE_CASE = [
[int(SCREAMING_SNAKE_CASE_ ) for element in line.split(',' )]
for line in input_file.readlines()
]
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = len(matrix[0] )
SCREAMING_SNAKE_CASE = [[-1 for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )]
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = matrix[i][0]
for j in range(1, SCREAMING_SNAKE_CASE_ ):
for i in range(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1, SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j], minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2, -1, -1 ):
SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j], minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'{solution() = }')
| 704 |
"""simple docstring"""
import os
import sys
import unittest
snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
snake_case = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
snake_case = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = get_test_to_tester_mapping(lowercase__ )
SCREAMING_SNAKE_CASE = get_test_to_tester_mapping(lowercase__ )
SCREAMING_SNAKE_CASE = {'BertModelTest': 'BertModelTester'}
SCREAMING_SNAKE_CASE = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowercase__ ) , lowercase__ )
self.assertEqual(get_test_info.to_json(lowercase__ ) , lowercase__ )
def A ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = get_model_to_test_mapping(lowercase__ )
SCREAMING_SNAKE_CASE = get_model_to_test_mapping(lowercase__ )
SCREAMING_SNAKE_CASE = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
SCREAMING_SNAKE_CASE = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowercase__ ) , lowercase__ )
self.assertEqual(get_test_info.to_json(lowercase__ ) , lowercase__ )
def A ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = get_model_to_tester_mapping(lowercase__ )
SCREAMING_SNAKE_CASE = get_model_to_tester_mapping(lowercase__ )
SCREAMING_SNAKE_CASE = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
SCREAMING_SNAKE_CASE = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowercase__ ) , lowercase__ )
self.assertEqual(get_test_info.to_json(lowercase__ ) , lowercase__ )
| 406 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE :List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[int] = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
SCREAMING_SNAKE_CASE :Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 628 |
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> List[str]:
__UpperCamelCase = {}
def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> None:
__UpperCamelCase = {}
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
if nodea not in self.connections:
self.add_node(_SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = probability
def __lowercase( self ) -> list[str]:
return list(self.connections )
def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> str:
__UpperCamelCase = 0
__UpperCamelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def _a ( __lowercase , __lowercase , __lowercase ) -> dict[str, int]:
"""simple docstring"""
__UpperCamelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__lowercase , __lowercase , __lowercase )
__UpperCamelCase = Counter(graph.get_nodes() )
__UpperCamelCase = start
for _ in range(__lowercase ):
__UpperCamelCase = graph.transition(__lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 383 | 0 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class snake_case_ ( a_ ):
__lowerCAmelCase = "detr"
__lowerCAmelCase = ["past_key_values"]
__lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , a_=True , a_=None , a_=3 , a_=1_0_0 , a_=6 , a_=2_0_4_8 , a_=8 , a_=6 , a_=2_0_4_8 , a_=8 , a_=0.0 , a_=0.0 , a_=True , a_="relu" , a_=2_5_6 , a_=0.1 , a_=0.0 , a_=0.0 , a_=0.02 , a_=1.0 , a_=False , a_="sine" , a_="resnet50" , a_=True , a_=False , a_=1 , a_=5 , a_=2 , a_=1 , a_=1 , a_=5 , a_=2 , a_=0.1 , **a_ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(a_ , a_ ):
a_ : Any = backbone_config.get("model_type" )
a_ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
a_ : List[str] = config_class.from_dict(a_ )
# set timm attributes to None
a_ , a_ , a_ : Optional[Any] = None, None, None
a_ : Dict = use_timm_backbone
a_ : str = backbone_config
a_ : List[str] = num_channels
a_ : Optional[Any] = num_queries
a_ : Optional[int] = d_model
a_ : Optional[int] = encoder_ffn_dim
a_ : List[str] = encoder_layers
a_ : Optional[int] = encoder_attention_heads
a_ : Optional[int] = decoder_ffn_dim
a_ : Tuple = decoder_layers
a_ : Dict = decoder_attention_heads
a_ : str = dropout
a_ : Any = attention_dropout
a_ : str = activation_dropout
a_ : List[Any] = activation_function
a_ : Union[str, Any] = init_std
a_ : List[str] = init_xavier_std
a_ : Dict = encoder_layerdrop
a_ : str = decoder_layerdrop
a_ : Optional[int] = encoder_layers
a_ : Any = auxiliary_loss
a_ : Union[str, Any] = position_embedding_type
a_ : List[str] = backbone
a_ : List[Any] = use_pretrained_backbone
a_ : List[str] = dilation
# Hungarian matcher
a_ : List[Any] = class_cost
a_ : List[str] = bbox_cost
a_ : List[Any] = giou_cost
# Loss coefficients
a_ : Tuple = mask_loss_coefficient
a_ : Optional[Any] = dice_loss_coefficient
a_ : Tuple = bbox_loss_coefficient
a_ : Optional[Any] = giou_loss_coefficient
a_ : Union[str, Any] = eos_coefficient
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def snake_case_ ( self ):
return self.encoder_attention_heads
@property
def snake_case_ ( self ):
return self.d_model
@classmethod
def snake_case_ ( cls , a_ , **a_ ):
return cls(backbone_config=a_ , **a_ )
def snake_case_ ( self ):
a_ : List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
a_ : Union[str, Any] = self.backbone_config.to_dict()
a_ : Union[str, Any] = self.__class__.model_type
return output
class snake_case_ ( a_ ):
__lowerCAmelCase = version.parse("1.11" )
@property
def snake_case_ ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def snake_case_ ( self ):
return 1e-5
@property
def snake_case_ ( self ):
return 1_2 | 370 |
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Tuple:
a_ : List[Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224", out_features=["stage1", "stage2", "stage3", "stage4"] )
a_ : List[str] = MaskFormerConfig(backbone_config=SCREAMING_SNAKE_CASE__ )
a_ : Dict = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
a_ : List[str] = 847
a_ : Optional[Any] = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
a_ : List[str] = 150
a_ : Tuple = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
a_ : Union[str, Any] = 171
a_ : Union[str, Any] = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
a_ : Optional[Any] = 133
a_ : List[str] = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
a_ : Union[str, Any] = 19
a_ : Any = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
a_ : int = 65
a_ : Any = "mapillary-vistas-id2label.json"
a_ : List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, repo_type="dataset" ), "r" ) )
a_ : Optional[Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
a_ : str = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> int:
a_ : Optional[int] = dct.pop(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = val
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> List[Any]:
a_ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a_ : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
a_ : str = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a_ : Tuple = in_proj_weight[:dim, :]
a_ : Union[str, Any] = in_proj_bias[: dim]
a_ : Dict = in_proj_weight[
dim : dim * 2, :
]
a_ : Tuple = in_proj_bias[
dim : dim * 2
]
a_ : Optional[int] = in_proj_weight[
-dim :, :
]
a_ : str = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Dict:
# fmt: off
a_ : List[str] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
a_ : Optional[int] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
a_ : int = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a_ : int = in_proj_weight[: hidden_size, :]
a_ : Tuple = in_proj_bias[:config.hidden_size]
a_ : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
a_ : Dict = in_proj_bias[hidden_size : hidden_size * 2]
a_ : Optional[int] = in_proj_weight[-hidden_size :, :]
a_ : List[str] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
a_ : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
a_ : str = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a_ : Dict = in_proj_weight[: hidden_size, :]
a_ : Optional[Any] = in_proj_bias[:config.hidden_size]
a_ : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
a_ : str = in_proj_bias[hidden_size : hidden_size * 2]
a_ : List[Any] = in_proj_weight[-hidden_size :, :]
a_ : Dict = in_proj_bias[-hidden_size :]
# fmt: on
def lowerCAmelCase_ ( ) -> torch.Tensor:
a_ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__, stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = False ) -> Optional[int]:
a_ : List[Any] = get_maskformer_config(SCREAMING_SNAKE_CASE__ )
# load original state_dict
with open(SCREAMING_SNAKE_CASE__, "rb" ) as f:
a_ : int = pickle.load(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
a_ : List[Any] = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
read_in_swin_q_k_v(SCREAMING_SNAKE_CASE__, config.backbone_config )
read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# update to torch tensors
for key, value in state_dict.items():
a_ : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# load 🤗 model
a_ : Tuple = MaskFormerForInstanceSegmentation(SCREAMING_SNAKE_CASE__ )
model.eval()
for name, param in model.named_parameters():
print(SCREAMING_SNAKE_CASE__, param.shape )
a_ , a_ : int = model.load_state_dict(SCREAMING_SNAKE_CASE__, strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
a_ : List[Any] = prepare_img()
if "vistas" in model_name:
a_ : int = 65
elif "cityscapes" in model_name:
a_ : str = 65_535
else:
a_ : int = 255
a_ : List[str] = True if "ade" in model_name else False
a_ : Any = MaskFormerImageProcessor(ignore_index=SCREAMING_SNAKE_CASE__, reduce_labels=SCREAMING_SNAKE_CASE__ )
a_ : int = image_processor(SCREAMING_SNAKE_CASE__, return_tensors="pt" )
a_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
print("Logits:", outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
a_ : Union[str, Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], SCREAMING_SNAKE_CASE__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 370 | 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = "conditional_detr"
snake_case = ["past_key_values"]
snake_case = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Tuple=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=300 , SCREAMING_SNAKE_CASE_ : Dict=6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2048 , SCREAMING_SNAKE_CASE_ : List[str]=8 , SCREAMING_SNAKE_CASE_ : Dict=6 , SCREAMING_SNAKE_CASE_ : Dict=2048 , SCREAMING_SNAKE_CASE_ : List[Any]=8 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Any="relu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=256 , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : str=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE_ : Dict=0.0_2 , SCREAMING_SNAKE_CASE_ : int=1.0 , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : List[Any]="sine" , SCREAMING_SNAKE_CASE_ : List[str]="resnet50" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : str=5 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Dict=5 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Dict=0.2_5 , **SCREAMING_SNAKE_CASE_ : Any , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(A_ , A_ ):
lowerCamelCase__ = backbone_config.get("""model_type""" )
lowerCamelCase__ = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ = config_class.from_dict(A_ )
lowerCamelCase__ = use_timm_backbone
lowerCamelCase__ = backbone_config
lowerCamelCase__ = num_channels
lowerCamelCase__ = num_queries
lowerCamelCase__ = d_model
lowerCamelCase__ = encoder_ffn_dim
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = encoder_attention_heads
lowerCamelCase__ = decoder_ffn_dim
lowerCamelCase__ = decoder_layers
lowerCamelCase__ = decoder_attention_heads
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = activation_function
lowerCamelCase__ = init_std
lowerCamelCase__ = init_xavier_std
lowerCamelCase__ = encoder_layerdrop
lowerCamelCase__ = decoder_layerdrop
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = auxiliary_loss
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = backbone
lowerCamelCase__ = use_pretrained_backbone
lowerCamelCase__ = dilation
# Hungarian matcher
lowerCamelCase__ = class_cost
lowerCamelCase__ = bbox_cost
lowerCamelCase__ = giou_cost
# Loss coefficients
lowerCamelCase__ = mask_loss_coefficient
lowerCamelCase__ = dice_loss_coefficient
lowerCamelCase__ = cls_loss_coefficient
lowerCamelCase__ = bbox_loss_coefficient
lowerCamelCase__ = giou_loss_coefficient
lowerCamelCase__ = focal_alpha
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def __UpperCAmelCase ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self : Any ):
return self.d_model
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase__ = self.backbone_config.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = version.parse("1.11" )
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
return 1e-5
@property
def __UpperCAmelCase ( self : Optional[Any] ):
return 12
| 129 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __snake_case (unittest.TestCase ):
def __init__( self: Any , A_: Optional[int] , A_: Any=7 , A_: Union[str, Any]=3 , A_: List[Any]=30 , A_: int=4_00 , A_: Any=True , A_: str=None , A_: Dict=True , A_: Optional[Any]=[0.5, 0.5, 0.5] , A_: List[Any]=[0.5, 0.5, 0.5] , A_: Any=True , A_: Optional[Any]=1 / 2_55 , A_: int=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCamelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean
__lowerCamelCase = image_std
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
def __a ( self: Tuple ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __a ( self: int , A_: List[str] , A_: List[Any]=False ):
if not batched:
__lowerCamelCase = image_inputs[0]
if isinstance(A_ , Image.Image ):
__lowerCamelCase ,__lowerCamelCase = image.size
else:
__lowerCamelCase ,__lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase = int(self.size["""shortest_edge"""] * h / w )
__lowerCamelCase = self.size["""shortest_edge"""]
elif w > h:
__lowerCamelCase = self.size["""shortest_edge"""]
__lowerCamelCase = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCamelCase = self.size["""shortest_edge"""]
__lowerCamelCase = self.size["""shortest_edge"""]
else:
__lowerCamelCase = []
for image in image_inputs:
__lowerCamelCase ,__lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCamelCase = max(A_ , key=lambda A_ : item[0] )[0]
__lowerCamelCase = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __snake_case (lowerCamelCase , unittest.TestCase ):
__a = DetaImageProcessor if is_vision_available() else None
def __a ( self: Tuple ):
__lowerCamelCase = DetaImageProcessingTester(self )
@property
def __a ( self: Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self: Tuple ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , """image_mean""" ) )
self.assertTrue(hasattr(A_ , """image_std""" ) )
self.assertTrue(hasattr(A_ , """do_normalize""" ) )
self.assertTrue(hasattr(A_ , """do_resize""" ) )
self.assertTrue(hasattr(A_ , """do_rescale""" ) )
self.assertTrue(hasattr(A_ , """do_pad""" ) )
self.assertTrue(hasattr(A_ , """size""" ) )
def __a ( self: List[Any] ):
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , A_ )
def __a ( self: Any ):
pass
def __a ( self: Optional[Any] ):
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCamelCase ,__lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase ,__lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
__lowerCamelCase = image_processing(A_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self: int ):
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCamelCase ,__lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase = image_processing(A_ , return_tensors="""pt""" ).pixel_values
__lowerCamelCase ,__lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self: Dict ):
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCamelCase ,__lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase = image_processing(A_ , return_tensors="""pt""" ).pixel_values
__lowerCamelCase ,__lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __a ( self: List[str] ):
# prepare image and target
__lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__lowerCamelCase = json.loads(f.read() )
__lowerCamelCase = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
__lowerCamelCase = DetaImageProcessor()
__lowerCamelCase = image_processing(images=A_ , annotations=A_ , return_tensors="""pt""" )
# verify pixel values
__lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , A_ )
__lowerCamelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
__lowerCamelCase = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A_ ) )
# verify boxes
__lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A_ )
__lowerCamelCase = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A_ , atol=1E-3 ) )
# verify image_id
__lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A_ ) )
# verify is_crowd
__lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A_ ) )
# verify class_labels
__lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A_ ) )
# verify orig_size
__lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A_ ) )
# verify size
__lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A_ ) )
@slow
def __a ( self: Tuple ):
# prepare image, target and masks_path
__lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__lowerCamelCase = json.loads(f.read() )
__lowerCamelCase = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
__lowerCamelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__lowerCamelCase = DetaImageProcessor(format="""coco_panoptic""" )
__lowerCamelCase = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors="""pt""" )
# verify pixel values
__lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , A_ )
__lowerCamelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
__lowerCamelCase = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A_ ) )
# verify boxes
__lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A_ )
__lowerCamelCase = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A_ , atol=1E-3 ) )
# verify image_id
__lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A_ ) )
# verify is_crowd
__lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A_ ) )
# verify class_labels
__lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A_ ) )
# verify masks
__lowerCamelCase = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , A_ )
# verify orig_size
__lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A_ ) )
# verify size
__lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A_ ) )
| 281 | 0 |
import os
A__ : List[str] = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 1_00, 'D': 5_00, 'M': 10_00}
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = 0
while index < len(lowerCamelCase_ ) - 1:
lowercase__ = SYMBOLS[numerals[index]]
lowercase__ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = ''''''
lowercase__ = num // 1000
numerals += m_count * "M"
num %= 1000
lowercase__ = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowercase__ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a ( lowerCamelCase_ = "/p089_roman.txt" ):
'''simple docstring'''
lowercase__ = 0
with open(os.path.dirname(lowerCamelCase_ ) + roman_numerals_filename ) as filea:
lowercase__ = filea.readlines()
for line in lines:
lowercase__ = line.strip()
lowercase__ = parse_roman_numerals(lowerCamelCase_ )
lowercase__ = generate_roman_numerals(lowerCamelCase_ )
savings += len(lowerCamelCase_ ) - len(lowerCamelCase_ )
return savings
if __name__ == "__main__":
print(F"{solution() = }") | 708 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
lowercase__ = arr.split(''',''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
lowercase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A__ : Dict = input('please input some numbers:')
A__ : Union[str, Any] = SubArray(whole_array)
A__ : int = array.solve_sub_array()
print(('the results is:', re))
| 671 | 0 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
'''simple docstring'''
snake_case : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case : Optional[int] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case : Tuple = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 638 |
'''simple docstring'''
from __future__ import annotations
import queue
class snake_case__ :
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
snake_case : Union[str, Any] = data
snake_case : Tuple = None
snake_case : int = None
def _UpperCamelCase ( ) -> TreeNode:
'''simple docstring'''
print('''\n********Press N to stop entering at any point of time********\n''' )
snake_case : List[Any] = input('''Enter the value of the root node: ''' ).strip().lower()
snake_case : queue.Queue = queue.Queue()
snake_case : List[Any] = TreeNode(int(SCREAMING_SNAKE_CASE__ ) )
q.put(SCREAMING_SNAKE_CASE__ )
while not q.empty():
snake_case : List[str] = q.get()
snake_case : List[str] = F'Enter the left node of {node_found.data}: '
snake_case : List[Any] = input(SCREAMING_SNAKE_CASE__ ).strip().lower() or '''n'''
if check == "n":
return tree_node
snake_case : Any = TreeNode(int(SCREAMING_SNAKE_CASE__ ) )
snake_case : List[str] = left_node
q.put(SCREAMING_SNAKE_CASE__ )
snake_case : Union[str, Any] = F'Enter the right node of {node_found.data}: '
snake_case : Optional[Any] = input(SCREAMING_SNAKE_CASE__ ).strip().lower() or '''n'''
if check == "n":
return tree_node
snake_case : Any = TreeNode(int(SCREAMING_SNAKE_CASE__ ) )
snake_case : Union[str, Any] = right_node
q.put(SCREAMING_SNAKE_CASE__ )
raise
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
snake_case : queue.Queue = queue.Queue()
q.put(SCREAMING_SNAKE_CASE__ )
while not q.empty():
snake_case : Any = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
snake_case : queue.Queue = queue.Queue()
q.put(SCREAMING_SNAKE_CASE__ )
while not q.empty():
snake_case : str = []
while not q.empty():
snake_case : int = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
snake_case : list[TreeNode] = []
snake_case : Union[str, Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(SCREAMING_SNAKE_CASE__ )
snake_case : Dict = n.left
# end of while means current node doesn't have left child
snake_case : List[Any] = stack.pop()
# start to traverse its right child
snake_case : Any = n.right
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
snake_case : list[TreeNode] = []
snake_case : Dict = node
while n or stack:
while n:
stack.append(SCREAMING_SNAKE_CASE__ )
snake_case : List[str] = n.left
snake_case : Union[str, Any] = stack.pop()
print(n.data , end=''',''' )
snake_case : int = n.right
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> None:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not node:
return
snake_case ,snake_case : Dict = [], []
snake_case : Optional[int] = node
stacka.append(SCREAMING_SNAKE_CASE__ )
while stacka: # to find the reversed order of post order, store it in stack2
snake_case : Union[str, Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(SCREAMING_SNAKE_CASE__ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = "" , SCREAMING_SNAKE_CASE__=50 , SCREAMING_SNAKE_CASE__="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
snake_case ,snake_case : Any = divmod(width - len(SCREAMING_SNAKE_CASE__ ) - 2 , 2 )
return F'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
lowercase__ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 638 | 1 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1024 ):
lowercase__ , lowercase__ = [], []
lowercase__ = list(zip(__magic_name__ , __magic_name__ ) )
lowercase__ , lowercase__ = sorted_examples[0]
def is_too_big(__magic_name__ ):
return tok(__magic_name__ , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowercase__ = new_src + " " + src
lowercase__ = new_tgt + " " + tgt
if is_too_big(__magic_name__ ) or is_too_big(__magic_name__ ): # cant fit, finalize example
finished_src.append(__magic_name__ )
finished_tgt.append(__magic_name__ )
lowercase__ , lowercase__ = src, tgt
else: # can fit, keep adding
lowercase__ , lowercase__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__magic_name__ )
finished_tgt.append(__magic_name__ )
return finished_src, finished_tgt
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = Path(__magic_name__ )
save_path.mkdir(exist_ok=__magic_name__ )
for split in ["train"]:
lowercase__ , lowercase__ = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
lowercase__ = [x.rstrip() for x in Path(__magic_name__ ).open().readlines()]
lowercase__ = [x.rstrip() for x in Path(__magic_name__ ).open().readlines()]
lowercase__ , lowercase__ = pack_examples(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
print(f'''packed {split} split from {len(__magic_name__ )} examples -> {len(__magic_name__ )}.''' )
Path(save_path / f'''{split}.source''' ).open("w" ).write("\n".join(__magic_name__ ) )
Path(save_path / f'''{split}.target''' ).open("w" ).write("\n".join(__magic_name__ ) )
for split in ["val", "test"]:
lowercase__ , lowercase__ = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
shutil.copyfile(__magic_name__ , save_path / f'''{split}.source''' )
shutil.copyfile(__magic_name__ , save_path / f'''{split}.target''' )
def _A ( ):
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=__magic_name__ , default=128 )
parser.add_argument("--data_dir" , type=__magic_name__ )
parser.add_argument("--save_path" , type=__magic_name__ )
lowercase__ = parser.parse_args()
lowercase__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__magic_name__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 611 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
__lowerCamelCase = CycleDiffusionPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {'latents'}
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowercase__ = CLIPTextModel(_lowercase )
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase ( self :Optional[Any] , _lowercase :List[Any] , _lowercase :Union[str, Any]=0 ):
'''simple docstring'''
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
lowercase__ = image / 2 + 0.5
if str(_lowercase ).startswith("mps" ):
lowercase__ = torch.manual_seed(_lowercase )
else:
lowercase__ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowercase__ = {
"prompt": "An astronaut riding an elephant",
"source_prompt": "An astronaut riding a horse",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"eta": 0.1,
"strength": 0.8,
"guidance_scale": 3,
"source_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = CycleDiffusionPipeline(**_lowercase )
lowercase__ = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase )
lowercase__ = output.images
lowercase__ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , "half" ):
lowercase__ = module.half()
lowercase__ = CycleDiffusionPipeline(**_lowercase )
lowercase__ = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase )
lowercase__ = output.images
lowercase__ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
lowercase__ = init_image.resize((5_12, 5_12) )
lowercase__ = "CompVis/stable-diffusion-v1-4"
lowercase__ = DDIMScheduler.from_pretrained(_lowercase , subfolder="scheduler" )
lowercase__ = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
lowercase__ = "A black colored car"
lowercase__ = "A blue colored car"
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type="np" , )
lowercase__ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
lowercase__ = init_image.resize((5_12, 5_12) )
lowercase__ = "CompVis/stable-diffusion-v1-4"
lowercase__ = DDIMScheduler.from_pretrained(_lowercase , subfolder="scheduler" )
lowercase__ = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
lowercase__ = "A black colored car"
lowercase__ = "A blue colored car"
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type="np" , )
lowercase__ = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 611 | 1 |
import os
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :str = os.path.join(os.path.dirname(__magic_name__ ) , 'num.txt' )
with open(__magic_name__ ) as file_hand:
return str(sum(int(__magic_name__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution()) | 687 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ):
if len(_UpperCAmelCase ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_lowerCAmelCase :list[float] = list(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = degree
def __add__( self: str , _UpperCAmelCase: Polynomial ):
if self.degree > polynomial_a.degree:
_lowerCAmelCase :Any = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _UpperCAmelCase )
else:
_lowerCAmelCase :List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _UpperCAmelCase )
def __sub__( self: str , _UpperCAmelCase: Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self: Union[str, Any] ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self: int , _UpperCAmelCase: Polynomial ):
_lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ):
_lowerCAmelCase :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self: Union[str, Any] ):
_lowerCAmelCase :Dict = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase )
return polynomial
def __repr__( self: Optional[Any] ):
return self.__str__()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :list[float] = [0] * self.degree
for i in range(self.degree ):
_lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ):
_lowerCAmelCase :list[float] = [0] * (self.degree + 2)
_lowerCAmelCase :str = constant
for i in range(self.degree + 1 ):
_lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _UpperCAmelCase )
def __eq__( self: List[Any] , _UpperCAmelCase: object ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self: Optional[Any] , _UpperCAmelCase: object ):
return not self.__eq__(_UpperCAmelCase ) | 687 | 1 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase__ ( _A ):
"""simple docstring"""
_UpperCamelCase : Any = 'EncodecFeatureExtractor'
_UpperCamelCase : List[Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self , snake_case , snake_case ):
'''simple docstring'''
super().__init__(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__ = self.feature_extractor
UpperCamelCase__ = False
def snake_case__ ( self , snake_case=None , snake_case=None , snake_case=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=__lowerCamelCase , language=__lowerCamelCase , no_timestamps=__lowerCamelCase )
def __call__( self , *snake_case , **snake_case ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase__ = kwargs.pop("audio" , __lowerCamelCase )
UpperCamelCase__ = kwargs.pop("sampling_rate" , __lowerCamelCase )
UpperCamelCase__ = kwargs.pop("text" , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
UpperCamelCase__ = args[0]
UpperCamelCase__ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
UpperCamelCase__ = self.tokenizer(__lowerCamelCase , **__lowerCamelCase )
if audio is not None:
UpperCamelCase__ = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
UpperCamelCase__ = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
UpperCamelCase__ = audio_inputs["padding_mask"]
return inputs
def snake_case__ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCamelCase__ = kwargs.pop("audio" , __lowerCamelCase )
UpperCamelCase__ = kwargs.pop("padding_mask" , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
UpperCamelCase__ = args[0]
UpperCamelCase__ = args[1:]
if audio_values is not None:
return self._decode_audio(__lowerCamelCase , padding_mask=__lowerCamelCase )
else:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def snake_case__ ( self , *snake_case , **snake_case ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
def snake_case__ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCamelCase__ = to_numpy(__lowerCamelCase )
UpperCamelCase__ = audio_values.shape
if padding_mask is None:
return list(__lowerCamelCase )
UpperCamelCase__ = to_numpy(__lowerCamelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
UpperCamelCase__ = seq_len - padding_mask.shape[-1]
UpperCamelCase__ = 1 - self.feature_extractor.padding_value
UpperCamelCase__ = np.pad(__lowerCamelCase , ((0, 0), (0, difference)) , "constant" , constant_values=__lowerCamelCase )
UpperCamelCase__ = audio_values.tolist()
for i in range(__lowerCamelCase ):
UpperCamelCase__ = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
UpperCamelCase__ = sliced_audio.reshape(__lowerCamelCase , -1 )
return audio_values
| 717 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCamelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def UpperCamelCase_( _A :Dict , _A :Union[str, Any] , _A :Optional[int]=8 )-> Dict:
UpperCamelCase__ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCamelCase__ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , movq=snake_case , )
UpperCamelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
if latents is None:
UpperCamelCase__ = randn_tensor(snake_case , generator=snake_case , device=snake_case , dtype=snake_case )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
UpperCamelCase__ = latents.to(snake_case )
UpperCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case=None , ):
'''simple docstring'''
UpperCamelCase__ = len(snake_case ) if isinstance(snake_case , snake_case ) else 1
# get prompt text embeddings
UpperCamelCase__ = self.tokenizer(
snake_case , padding="max_length" , truncation=snake_case , max_length=77 , return_attention_mask=snake_case , add_special_tokens=snake_case , return_tensors="pt" , )
UpperCamelCase__ = text_inputs.input_ids
UpperCamelCase__ = self.tokenizer(snake_case , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(snake_case , snake_case ):
UpperCamelCase__ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase__ = text_input_ids.to(snake_case )
UpperCamelCase__ = text_inputs.attention_mask.to(snake_case )
UpperCamelCase__, UpperCamelCase__ = self.text_encoder(
input_ids=snake_case , attention_mask=snake_case )
UpperCamelCase__ = prompt_embeds.repeat_interleave(snake_case , dim=0 )
UpperCamelCase__ = text_encoder_hidden_states.repeat_interleave(snake_case , dim=0 )
UpperCamelCase__ = text_mask.repeat_interleave(snake_case , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase__ = 42
if negative_prompt is None:
UpperCamelCase__ = [""] * batch_size
elif type(snake_case ) is not type(snake_case ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !='''
F''' {type(snake_case )}.''' )
elif isinstance(snake_case , snake_case ):
UpperCamelCase__ = [negative_prompt]
elif batch_size != len(snake_case ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
UpperCamelCase__ = negative_prompt
UpperCamelCase__ = self.tokenizer(
snake_case , padding="max_length" , max_length=77 , truncation=snake_case , return_attention_mask=snake_case , add_special_tokens=snake_case , return_tensors="pt" , )
UpperCamelCase__ = uncond_input.input_ids.to(snake_case )
UpperCamelCase__ = uncond_input.attention_mask.to(snake_case )
UpperCamelCase__, UpperCamelCase__ = self.text_encoder(
input_ids=snake_case , attention_mask=snake_case )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ = negative_prompt_embeds.shape[1]
UpperCamelCase__ = negative_prompt_embeds.repeat(1 , snake_case )
UpperCamelCase__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case )
UpperCamelCase__ = uncond_text_encoder_hidden_states.shape[1]
UpperCamelCase__ = uncond_text_encoder_hidden_states.repeat(1 , snake_case , 1 )
UpperCamelCase__ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , snake_case , -1 )
UpperCamelCase__ = uncond_text_mask.repeat_interleave(snake_case , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCamelCase__ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCamelCase__ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def snake_case__ ( self , snake_case=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase__ = torch.device(F'''cuda:{gpu_id}''' )
UpperCamelCase__ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case , snake_case )
def snake_case__ ( self , snake_case=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCamelCase__ = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase__ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCamelCase__, UpperCamelCase__ = cpu_offload_with_hook(snake_case , snake_case , prev_module_hook=snake_case )
if self.safety_checker is not None:
UpperCamelCase__, UpperCamelCase__ = cpu_offload_with_hook(self.safety_checker , snake_case , prev_module_hook=snake_case )
# We'll offload the last model manually.
UpperCamelCase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case__ ( self ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case )
def __call__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = 512 , snake_case = 512 , snake_case = 100 , snake_case = 4.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
UpperCamelCase__ = 1
elif isinstance(snake_case , snake_case ):
UpperCamelCase__ = len(snake_case )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(snake_case )}''' )
UpperCamelCase__ = self._execution_device
UpperCamelCase__ = batch_size * num_images_per_prompt
UpperCamelCase__ = guidance_scale > 1.0
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = self._encode_prompt(
snake_case , snake_case , snake_case , snake_case , snake_case )
if isinstance(snake_case , snake_case ):
UpperCamelCase__ = torch.cat(snake_case , dim=0 )
if isinstance(snake_case , snake_case ):
UpperCamelCase__ = torch.cat(snake_case , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase__ = image_embeds.repeat_interleave(snake_case , dim=0 )
UpperCamelCase__ = negative_image_embeds.repeat_interleave(snake_case , dim=0 )
UpperCamelCase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=snake_case )
self.scheduler.set_timesteps(snake_case , device=snake_case )
UpperCamelCase__ = self.scheduler.timesteps
UpperCamelCase__ = self.unet.config.in_channels
UpperCamelCase__, UpperCamelCase__ = get_new_h_w(snake_case , snake_case , self.movq_scale_factor )
# create initial latent
UpperCamelCase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , snake_case , snake_case , snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
UpperCamelCase__ = self.unet(
sample=snake_case , timestep=snake_case , encoder_hidden_states=snake_case , added_cond_kwargs=snake_case , return_dict=snake_case , )[0]
if do_classifier_free_guidance:
UpperCamelCase__, UpperCamelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase__, UpperCamelCase__ = noise_pred.chunk(2 )
UpperCamelCase__, UpperCamelCase__ = variance_pred.chunk(2 )
UpperCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase__, UpperCamelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(
snake_case , snake_case , snake_case , generator=snake_case , ).prev_sample
# post-processing
UpperCamelCase__ = self.movq.decode(snake_case , force_not_quantize=snake_case )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
UpperCamelCase__ = image * 0.5 + 0.5
UpperCamelCase__ = image.clamp(0 , 1 )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 185 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
a_ = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 147 | 0 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
A__: Optional[int] = TypeVar('''KT''')
A__: Dict = TypeVar('''VT''')
class _a ( Generic[KT, VT]):
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCamelCase: KT | str = "root" , __lowerCamelCase: VT | None = None ):
'''simple docstring'''
UpperCamelCase__: int = key
UpperCamelCase__: List[Any] = value
UpperCamelCase__: list[Node[KT, VT]] = []
def __repr__( self: int ):
'''simple docstring'''
return F"Node({self.key}: {self.value})"
@property
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
return len(self.forward )
class _a ( Generic[KT, VT]):
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCamelCase: float = 0.5 , __lowerCamelCase: int = 16 ):
'''simple docstring'''
UpperCamelCase__: Node[KT, VT] = Node[KT, VT]()
UpperCamelCase__: Dict = 0
UpperCamelCase__: List[Any] = p
UpperCamelCase__: str = max_level
def __str__( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = list(self )
if len(__lowerCamelCase ) == 0:
return F"SkipList(level={self.level})"
UpperCamelCase__: int = max((len(str(__lowerCamelCase ) ) for item in items) , default=4 )
UpperCamelCase__: Optional[Any] = max(__lowerCamelCase , 4 ) + 4
UpperCamelCase__: List[str] = self.head
UpperCamelCase__: Tuple = []
UpperCamelCase__: Dict = node.forward.copy()
lines.append(F"[{node.key}]".ljust(__lowerCamelCase , "-" ) + "* " * len(__lowerCamelCase ) )
lines.append(" " * label_size + "| " * len(__lowerCamelCase ) )
while len(node.forward ) != 0:
UpperCamelCase__: int = node.forward[0]
lines.append(
F"[{node.key}]".ljust(__lowerCamelCase , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(__lowerCamelCase ) )
UpperCamelCase__: Dict = node.forward
lines.append("None".ljust(__lowerCamelCase ) + "* " * len(__lowerCamelCase ) )
return F"SkipList(level={self.level})\n" + "\n".join(__lowerCamelCase )
def __iter__( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
UpperCamelCase__: Any = node.forward[0]
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: str = []
UpperCamelCase__: Any = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
UpperCamelCase__: Union[str, Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__lowerCamelCase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase_ ( self: Optional[Any] , __lowerCamelCase: KT ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: Optional[int] = self._locate_node(__lowerCamelCase )
if node is not None:
for i, update_node in enumerate(__lowerCamelCase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
UpperCamelCase__: List[Any] = node.forward[i]
else:
UpperCamelCase__: Any = update_node.forward[:i]
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: KT , __lowerCamelCase: VT ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: Optional[Any] = self._locate_node(__lowerCamelCase )
if node is not None:
UpperCamelCase__: int = value
else:
UpperCamelCase__: List[str] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __lowerCamelCase ):
update_vector.append(self.head )
UpperCamelCase__: Tuple = level
UpperCamelCase__: Any = Node(__lowerCamelCase , __lowerCamelCase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__lowerCamelCase )
else:
UpperCamelCase__: List[Any] = new_node
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: VT ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: Optional[int] = self._locate_node(__lowerCamelCase )
if node is not None:
return node.value
return None
def lowerCAmelCase_ ( ):
UpperCamelCase__: List[str] = SkipList()
skip_list.insert("Key1" ,3)
skip_list.insert("Key2" ,12)
skip_list.insert("Key3" ,41)
skip_list.insert("Key4" ,-19)
UpperCamelCase__: str = skip_list.head
UpperCamelCase__: Dict = {}
while node.level != 0:
UpperCamelCase__: Optional[Any] = node.forward[0]
UpperCamelCase__: Tuple = node.value
assert len(A_) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def lowerCAmelCase_ ( ):
UpperCamelCase__: Any = SkipList()
skip_list.insert("Key1" ,10)
skip_list.insert("Key1" ,12)
skip_list.insert("Key5" ,7)
skip_list.insert("Key7" ,10)
skip_list.insert("Key10" ,5)
skip_list.insert("Key7" ,7)
skip_list.insert("Key5" ,5)
skip_list.insert("Key10" ,10)
UpperCamelCase__: Union[str, Any] = skip_list.head
UpperCamelCase__: Optional[Any] = {}
while node.level != 0:
UpperCamelCase__: Optional[int] = node.forward[0]
UpperCamelCase__: Tuple = node.value
if len(A_) != 4:
print()
assert len(A_) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def lowerCAmelCase_ ( ):
UpperCamelCase__: Optional[int] = SkipList()
assert skip_list.find("Some key") is None
def lowerCAmelCase_ ( ):
UpperCamelCase__: Any = SkipList()
skip_list.insert("Key2" ,20)
assert skip_list.find("Key2") == 20
skip_list.insert("Some Key" ,10)
skip_list.insert("Key2" ,8)
skip_list.insert("V" ,13)
assert skip_list.find("Y") is None
assert skip_list.find("Key2") == 8
assert skip_list.find("Some Key") == 10
assert skip_list.find("V") == 13
def lowerCAmelCase_ ( ):
UpperCamelCase__: Tuple = SkipList()
skip_list.delete("Some key")
assert len(skip_list.head.forward) == 0
def lowerCAmelCase_ ( ):
UpperCamelCase__: Any = SkipList()
skip_list.insert("Key1" ,12)
skip_list.insert("V" ,13)
skip_list.insert("X" ,14)
skip_list.insert("Key2" ,15)
skip_list.delete("V")
skip_list.delete("Key2")
assert skip_list.find("V") is None
assert skip_list.find("Key2") is None
def lowerCAmelCase_ ( ):
UpperCamelCase__: Optional[Any] = SkipList()
skip_list.insert("Key1" ,12)
skip_list.insert("V" ,13)
skip_list.insert("X" ,14)
skip_list.insert("Key2" ,15)
skip_list.delete("V")
assert skip_list.find("V") is None
assert skip_list.find("X") == 14
assert skip_list.find("Key1") == 12
assert skip_list.find("Key2") == 15
skip_list.delete("X")
assert skip_list.find("V") is None
assert skip_list.find("X") is None
assert skip_list.find("Key1") == 12
assert skip_list.find("Key2") == 15
skip_list.delete("Key1")
assert skip_list.find("V") is None
assert skip_list.find("X") is None
assert skip_list.find("Key1") is None
assert skip_list.find("Key2") == 15
skip_list.delete("Key2")
assert skip_list.find("V") is None
assert skip_list.find("X") is None
assert skip_list.find("Key1") is None
assert skip_list.find("Key2") is None
def lowerCAmelCase_ ( ):
UpperCamelCase__: List[Any] = SkipList()
skip_list.insert("Key1" ,12)
skip_list.insert("V" ,13)
skip_list.insert("X" ,1_42)
skip_list.insert("Key2" ,15)
skip_list.delete("X")
def traverse_keys(A_):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(A_)
assert len(set(traverse_keys(skip_list.head))) == 4
def lowerCAmelCase_ ( ):
def is_sorted(A_):
return all(next_item >= item for item, next_item in zip(A_ ,lst[1:]))
UpperCamelCase__: Tuple = SkipList()
for i in range(10):
skip_list.insert(A_ ,A_)
assert is_sorted(list(A_))
skip_list.delete(5)
skip_list.delete(8)
skip_list.delete(2)
assert is_sorted(list(A_))
skip_list.insert(-12 ,-12)
skip_list.insert(77 ,77)
assert is_sorted(list(A_))
def lowerCAmelCase_ ( ):
for _ in range(1_00):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def lowerCAmelCase_ ( ):
UpperCamelCase__: Optional[Any] = SkipList()
skip_list.insert(2 ,"2")
skip_list.insert(4 ,"4")
skip_list.insert(6 ,"4")
skip_list.insert(4 ,"5")
skip_list.insert(8 ,"4")
skip_list.insert(9 ,"4")
skip_list.delete(4)
print(A_)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 221 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _a ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Tuple = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: List[str] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: Tuple = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: List[str] = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCamelCase__: int = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: List[Any] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCamelCase__: Optional[int] = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
UpperCamelCase__: Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Tuple = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__: Union[str, Any] = "fp16"
self.assertFalse(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: str = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
UpperCamelCase__: List[Any] = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: Dict = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
UpperCamelCase__: Dict = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Any = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCamelCase__: Optional[int] = "fp16"
self.assertFalse(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase ) )
| 221 | 1 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = ["image_processor", "tokenizer"]
a_ = "OwlViTImageProcessor"
a_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
'''simple docstring'''
a_ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase_ , )
a_ : List[Any] = kwargs.pop("""feature_extractor""" )
a_ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="max_length" , lowerCAmelCase_="np" , **lowerCAmelCase_ ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not isinstance(text[0] , lowerCAmelCase_ )):
a_ : Union[str, Any] = [self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )]
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(text[0] , lowerCAmelCase_ ):
a_ : Optional[int] = []
# Maximum number of queries across batch
a_ : List[str] = max([len(lowerCAmelCase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCAmelCase_ ) != max_num_queries:
a_ : Optional[Any] = t + [""" """] * (max_num_queries - len(lowerCAmelCase_ ))
a_ : Tuple = self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
encodings.append(lowerCAmelCase_ )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
a_ : List[str] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
a_ : Tuple = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
a_ : Union[str, Any] = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
a_ : int = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
a_ : Any = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
a_ : Union[str, Any] = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
a_ : int = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
a_ : int = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
a_ : Any = BatchEncoding()
a_ : List[str] = input_ids
a_ : Dict = attention_mask
if query_images is not None:
a_ : Optional[Any] = BatchEncoding()
a_ : Tuple = self.image_processor(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ).pixel_values
a_ : Any = query_pixel_values
if images is not None:
a_ : Optional[int] = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and images is not None:
a_ : Union[str, Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
a_ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def _lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
return self.image_processor.post_process(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase_ , )
return self.image_processor_class
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCAmelCase_ , )
return self.image_processor
| 577 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case: Dict = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case: str = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__snake_case: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 577 | 1 |
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def a__ ( snake_case , snake_case=0 ):
"""simple docstring"""
return sorted(snake_case , key=lambda snake_case : x[column] )
def a__ ( snake_case , snake_case , snake_case=float('''inf''' ) ):
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , snake_case ):
__SCREAMING_SNAKE_CASE : Tuple = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE : Optional[int] = current_dis
return min_dis
def a__ ( snake_case , snake_case , snake_case=float('''inf''' ) ):
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , snake_case ):
for j in range(max(0 , i - 6 ) , snake_case ):
__SCREAMING_SNAKE_CASE : Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE : int = current_dis
return min_dis
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
# base case
if points_counts <= 3:
return dis_between_closest_pair(snake_case , snake_case )
# recursion
__SCREAMING_SNAKE_CASE : Optional[Any] = points_counts // 2
__SCREAMING_SNAKE_CASE : Dict = closest_pair_of_points_sqr(
snake_case , points_sorted_on_y[:mid] , snake_case )
__SCREAMING_SNAKE_CASE : List[str] = closest_pair_of_points_sqr(
snake_case , points_sorted_on_y[mid:] , points_counts - mid )
__SCREAMING_SNAKE_CASE : List[Any] = min(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(snake_case )
__SCREAMING_SNAKE_CASE : Dict = dis_between_closest_in_strip(
snake_case , len(snake_case ) , snake_case )
return min(snake_case , snake_case )
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = column_based_sort(snake_case , column=0 )
__SCREAMING_SNAKE_CASE : List[Any] = column_based_sort(snake_case , column=1 )
return (
closest_pair_of_points_sqr(
snake_case , snake_case , snake_case )
) ** 0.5
if __name__ == "__main__":
lowercase_ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 131 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# load base model
__SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(snake_case , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__SCREAMING_SNAKE_CASE : Any = load_file(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__SCREAMING_SNAKE_CASE : List[str] = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.text_encoder
else:
__SCREAMING_SNAKE_CASE : int = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
__SCREAMING_SNAKE_CASE : Any = pipeline.unet
# find the target layer
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_infos.pop(0 )
while len(snake_case ) > -1:
try:
__SCREAMING_SNAKE_CASE : Dict = curr_layer.__getattr__(snake_case )
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = layer_infos.pop(0 )
elif len(snake_case ) == 0:
break
except Exception:
if len(snake_case ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__SCREAMING_SNAKE_CASE : Any = layer_infos.pop(0 )
__SCREAMING_SNAKE_CASE : int = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(snake_case )
else:
pair_keys.append(snake_case )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__SCREAMING_SNAKE_CASE : List[str] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__SCREAMING_SNAKE_CASE : Any = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case , snake_case ).unsqueeze(2 ).unsqueeze(3 )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = state_dict[pair_keys[0]].to(torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case , snake_case )
# update visited list
for item in pair_keys:
visited.append(snake_case )
return pipeline
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
lowercase_ = parser.parse_args()
lowercase_ = args.base_model_path
lowercase_ = args.checkpoint_path
lowercase_ = args.dump_path
lowercase_ = args.lora_prefix_unet
lowercase_ = args.lora_prefix_text_encoder
lowercase_ = args.alpha
lowercase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowercase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 131 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__A : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = ['input_features', 'is_longer']
def __init__( self , snake_case_=64 , snake_case_=4_8000 , snake_case_=480 , snake_case_=10 , snake_case_=1024 , snake_case_=0.0 , snake_case_=False , snake_case_ = 0 , snake_case_ = 1_4000 , snake_case_ = None , snake_case_ = "fusion" , snake_case_ = "repeatpad" , **snake_case_ , ):
super().__init__(
feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
_A = top_db
_A = truncation
_A = padding
_A = fft_window_size
_A = (fft_window_size >> 1) + 1
_A = hop_length
_A = max_length_s
_A = max_length_s * sampling_rate
_A = sampling_rate
_A = frequency_min
_A = frequency_max
_A = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm=snake_case_ , mel_scale='htk' , )
_A = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm='slaney' , mel_scale='slaney' , )
def lowerCAmelCase__ ( self ):
_A = copy.deepcopy(self.__dict__ )
_A = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = None ):
_A = spectrogram(
snake_case_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case_ , log_mel='dB' , )
return log_mel_spectrogram.T
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ):
_A = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_A = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_A = [0]
# randomly choose index for each part
_A = np.random.choice(ranges[0] )
_A = np.random.choice(ranges[1] )
_A = np.random.choice(ranges[2] )
_A = mel[idx_front : idx_front + chunk_frames, :]
_A = mel[idx_middle : idx_middle + chunk_frames, :]
_A = mel[idx_back : idx_back + chunk_frames, :]
_A = torch.tensor(mel[None, None, :] )
_A = torch.nn.functional.interpolate(
snake_case_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case_ )
_A = mel_shrink[0][0].numpy()
_A = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_A = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_A = len(snake_case_ ) - max_length
_A = np.random.randint(0 , overflow + 1 )
_A = waveform[idx : idx + max_length]
_A = self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_A = self._np_extract_fbank_features(snake_case_ , self.mel_filters )
_A = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_A = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_A = np.stack([mel, mel, mel, mel] , axis=0 )
_A = False
else:
_A = self._random_mel_fusion(snake_case_ , snake_case_ , snake_case_ )
_A = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented" )
else:
_A = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_A = int(max_length / len(snake_case_ ) )
_A = np.stack(np.tile(snake_case_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_A = int(max_length / len(snake_case_ ) )
_A = np.stack(np.tile(snake_case_ , snake_case_ ) )
_A = np.pad(snake_case_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_A = self._np_extract_fbank_features(snake_case_ , self.mel_filters )
_A = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_A = self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , **snake_case_ , ):
_A = truncation if truncation is not None else self.truncation
_A = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_A = isinstance(snake_case_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
_A = is_batched_numpy or (
isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray(snake_case_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case_ , np.ndarray ):
_A = np.asarray(snake_case_ , dtype=np.floataa )
elif isinstance(snake_case_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray(snake_case_ )]
# convert to mel spectrogram, truncate and pad if needed.
_A = [
self._get_input_mel(snake_case_ , max_length if max_length else self.nb_max_samples , snake_case_ , snake_case_ )
for waveform in raw_speech
]
_A = []
_A = []
for mel, longer in padded_inputs:
input_mel.append(snake_case_ )
is_longer.append(snake_case_ )
if truncation == "fusion" and sum(snake_case_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_A = np.random.randint(0 , len(snake_case_ ) )
_A = True
if isinstance(input_mel[0] , snake_case_ ):
_A = [np.asarray(snake_case_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_A = [[longer] for longer in is_longer]
_A = {'input_features': input_mel, 'is_longer': is_longer}
_A = BatchFeature(snake_case_ )
if return_tensors is not None:
_A = input_features.convert_to_tensors(snake_case_ )
return input_features
| 27 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
UpperCAmelCase__ : List[Any] = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(__UpperCamelCase )
# Let's go
UpperCAmelCase__ : int = parser.parse_args()
if not hasattr(__UpperCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase__ : Union[str, Any] = args.func(__UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 65 | 0 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class _lowerCamelCase ( a_ , unittest.TestCase ):
_lowerCamelCase :Dict = FlaxAutoencoderKL
@property
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = 4
lowerCAmelCase__ : Tuple = 3
lowerCAmelCase__ : Union[str, Any] = (32, 32)
lowerCAmelCase__ : Dict = jax.random.PRNGKey(0 )
lowerCAmelCase__ : Optional[int] = jax.random.uniform(UpperCamelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Dict = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
lowerCAmelCase__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
| 507 |
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_A = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_A = direct_transformers_import(PATH_TO_TRANSFORMERS)
_A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_A = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_A = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowercase_ ( __UpperCAmelCase ) -> str:
lowerCAmelCase__ : Union[str, Any] = None
# source code of `config_class`
lowerCAmelCase__ : List[Any] = inspect.getsource(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = _re_checkpoint.findall(__UpperCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("""/""" ):
lowerCAmelCase__ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase__ : Dict = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase__ : Optional[Any] = ckpt_name
break
return checkpoint
def lowercase_ ( ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase__ : Dict = get_checkpoint_from_config_class(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
lowerCAmelCase__ : int = """\n""".join(sorted(__UpperCAmelCase ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 507 | 1 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
UpperCamelCase_ = input_file.read()
UpperCamelCase_ = regexp.search(_UpperCAmelCase )
return match
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
UpperCamelCase_ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase_ = regexp.finditer(_UpperCAmelCase )
UpperCamelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 23 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ :List[Any] = logging.get_logger(__name__)
lowerCAmelCase__ :int = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __a ( UpperCAmelCase ):
_a : Dict = 'blenderbot-small'
_a : Tuple = ['past_key_values']
_a : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _SCREAMING_SNAKE_CASE=50265 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = use_cache
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , forced_eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class __a ( UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_UpperCAmelCase = {0: 'batch'}
_UpperCAmelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
_UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_UpperCAmelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_UpperCAmelCase , _UpperCAmelCase = self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'}
_UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_UpperCAmelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase = super().outputs
else:
_UpperCAmelCase = super(_SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
_UpperCAmelCase , _UpperCAmelCase = self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'}
_UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Generate decoder inputs
_UpperCAmelCase = seq_length if not self.use_past else 1
_UpperCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_UpperCAmelCase = dict(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase = common_inputs['input_ids'].shape
_UpperCAmelCase = common_inputs['decoder_input_ids'].shape[1]
_UpperCAmelCase , _UpperCAmelCase = self.num_attention_heads
_UpperCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCAmelCase = decoder_seq_length + 3
_UpperCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_UpperCAmelCase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] , dim=1 )
_UpperCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_UpperCAmelCase , _UpperCAmelCase = self.num_layers
_UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - min_num_layers
_UpperCAmelCase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
_UpperCAmelCase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) )
return common_inputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_UpperCAmelCase = seqlen + 2
_UpperCAmelCase , _UpperCAmelCase = self.num_layers
_UpperCAmelCase , _UpperCAmelCase = self.num_attention_heads
_UpperCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCAmelCase = common_inputs['attention_mask'].dtype
_UpperCAmelCase = torch.cat(
[common_inputs['attention_mask'], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
_UpperCAmelCase = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(_SCREAMING_SNAKE_CASE )
]
return common_inputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = tokenizer.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_UpperCAmelCase = dict(tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
return common_inputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
elif self.task == "causal-lm":
_UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
return common_inputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase = super()._flatten_past_key_values_(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = super(_SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 618 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( _a ,unittest.TestCase ):
lowercase_ = UnCLIPImageVariationPipeline
lowercase_ = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase_ = IMAGE_VARIATION_BATCH_PARAMS
lowercase_ = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase_ = False
@property
def __lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim
@property
def __lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
return 1_00
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(lowerCAmelCase_ )
@property
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(lowerCAmelCase_ )
@property
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_a = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
_a = UnCLIPTextProjModel(**lowerCAmelCase_ )
return model
@property
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
_a = {
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
_a = UNetaDConditionModel(**lowerCAmelCase_ )
return model
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
_a = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
torch.manual_seed(1 )
_a = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_a = self.dummy_decoder
_a = self.dummy_text_proj
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_super_res_first
_a = self.dummy_super_res_last
_a = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=10_00 , )
_a = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=10_00 , )
_a = CLIPImageProcessor(crop_size=32 , size=32 )
_a = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : Dict=True ) -> Tuple:
"""simple docstring"""
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
if str(lowerCAmelCase_ ).startswith('''mps''' ):
_a = torch.manual_seed(lowerCAmelCase_ )
else:
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
if pil_image:
_a = input_image * 0.5 + 0.5
_a = input_image.clamp(0 , 1 )
_a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = DiffusionPipeline.numpy_to_pil(lowerCAmelCase_ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = '''cpu'''
_a = self.get_dummy_components()
_a = self.pipeline_class(**lowerCAmelCase_ )
_a = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_ )
_a = pipe(**lowerCAmelCase_ )
_a = output.images
_a = self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_ )
_a = pipe(
**lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a = '''cpu'''
_a = self.get_dummy_components()
_a = self.pipeline_class(**lowerCAmelCase_ )
_a = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_ )
_a = pipe(**lowerCAmelCase_ )
_a = output.images
_a = self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_ )
_a = pipe(
**lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_a = '''cpu'''
_a = self.get_dummy_components()
_a = self.pipeline_class(**lowerCAmelCase_ )
_a = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_ )
_a = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
_a = pipe(**lowerCAmelCase_ )
_a = output.images
_a = self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_ )
_a = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
_a = pipe(
**lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
_a = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_a = torch.device('''cpu''' )
class A :
lowercase_ = 1
_a = self.get_dummy_components()
_a = self.pipeline_class(**lowerCAmelCase_ )
_a = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_a = pipe.decoder.dtype
_a = 1
_a = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
_a = pipe.prepare_latents(
lowerCAmelCase_ , dtype=lowerCAmelCase_ , device=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , scheduler=DummyScheduler() )
_a = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
_a = pipe.prepare_latents(
lowerCAmelCase_ , dtype=lowerCAmelCase_ , device=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , scheduler=DummyScheduler() )
_a = self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_ )
_a = pipe(
**lowerCAmelCase_ , decoder_latents=lowerCAmelCase_ , super_res_latents=lowerCAmelCase_ ).images
_a = self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_ )
# Don't pass image, instead pass embedding
_a = pipeline_inputs.pop('''image''' )
_a = pipe.image_encoder(lowerCAmelCase_ ).image_embeds
_a = pipe(
**lowerCAmelCase_ , decoder_latents=lowerCAmelCase_ , super_res_latents=lowerCAmelCase_ , image_embeddings=lowerCAmelCase_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_a = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
_a = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCAmelCase_ , expected_max_diff=lowerCAmelCase_ )
@skip_mps
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_a = torch_device == '''cpu'''
_a = True
_a = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=lowerCAmelCase_ , relax_max_difference=lowerCAmelCase_ , additional_params_copy_to_batched_inputs=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_a = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
_a = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowerCAmelCase_ , additional_params_copy_to_batched_inputs=lowerCAmelCase_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowerCAmelCase_ )
@skip_mps
def __lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
_a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
_a = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
_a = pipeline.to(lowerCAmelCase_ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a = pipeline(
lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type='''np''' , )
_a = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ , 15 )
| 700 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( _a ,unittest.TestCase ):
lowercase_ = UnCLIPImageVariationPipeline
lowercase_ = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase_ = IMAGE_VARIATION_BATCH_PARAMS
lowercase_ = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase_ = False
@property
def __lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim
@property
def __lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
return 1_00
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(lowerCAmelCase_ )
@property
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(lowerCAmelCase_ )
@property
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_a = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
_a = UnCLIPTextProjModel(**lowerCAmelCase_ )
return model
@property
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
_a = {
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
_a = UNetaDConditionModel(**lowerCAmelCase_ )
return model
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
_a = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
torch.manual_seed(1 )
_a = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_a = self.dummy_decoder
_a = self.dummy_text_proj
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_super_res_first
_a = self.dummy_super_res_last
_a = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=10_00 , )
_a = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=10_00 , )
_a = CLIPImageProcessor(crop_size=32 , size=32 )
_a = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : Dict=True ) -> Tuple:
"""simple docstring"""
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
if str(lowerCAmelCase_ ).startswith('''mps''' ):
_a = torch.manual_seed(lowerCAmelCase_ )
else:
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
if pil_image:
_a = input_image * 0.5 + 0.5
_a = input_image.clamp(0 , 1 )
_a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = DiffusionPipeline.numpy_to_pil(lowerCAmelCase_ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = '''cpu'''
_a = self.get_dummy_components()
_a = self.pipeline_class(**lowerCAmelCase_ )
_a = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_ )
_a = pipe(**lowerCAmelCase_ )
_a = output.images
_a = self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_ )
_a = pipe(
**lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_a = '''cpu'''
_a = self.get_dummy_components()
_a = self.pipeline_class(**lowerCAmelCase_ )
_a = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_ )
_a = pipe(**lowerCAmelCase_ )
_a = output.images
_a = self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_ )
_a = pipe(
**lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_a = '''cpu'''
_a = self.get_dummy_components()
_a = self.pipeline_class(**lowerCAmelCase_ )
_a = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_ )
_a = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
_a = pipe(**lowerCAmelCase_ )
_a = output.images
_a = self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_ )
_a = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
_a = pipe(
**lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
_a = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_a = torch.device('''cpu''' )
class A :
lowercase_ = 1
_a = self.get_dummy_components()
_a = self.pipeline_class(**lowerCAmelCase_ )
_a = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
_a = pipe.decoder.dtype
_a = 1
_a = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
_a = pipe.prepare_latents(
lowerCAmelCase_ , dtype=lowerCAmelCase_ , device=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , scheduler=DummyScheduler() )
_a = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
_a = pipe.prepare_latents(
lowerCAmelCase_ , dtype=lowerCAmelCase_ , device=lowerCAmelCase_ , generator=lowerCAmelCase_ , latents=lowerCAmelCase_ , scheduler=DummyScheduler() )
_a = self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_ )
_a = pipe(
**lowerCAmelCase_ , decoder_latents=lowerCAmelCase_ , super_res_latents=lowerCAmelCase_ ).images
_a = self.get_dummy_inputs(lowerCAmelCase_ , pil_image=lowerCAmelCase_ )
# Don't pass image, instead pass embedding
_a = pipeline_inputs.pop('''image''' )
_a = pipe.image_encoder(lowerCAmelCase_ ).image_embeds
_a = pipe(
**lowerCAmelCase_ , decoder_latents=lowerCAmelCase_ , super_res_latents=lowerCAmelCase_ , image_embeddings=lowerCAmelCase_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_a = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
_a = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCAmelCase_ , expected_max_diff=lowerCAmelCase_ )
@skip_mps
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_a = torch_device == '''cpu'''
_a = True
_a = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=lowerCAmelCase_ , relax_max_difference=lowerCAmelCase_ , additional_params_copy_to_batched_inputs=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_a = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
_a = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowerCAmelCase_ , additional_params_copy_to_batched_inputs=lowerCAmelCase_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowerCAmelCase_ )
@skip_mps
def __lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
_a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
_a = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
_a = pipeline.to(lowerCAmelCase_ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a = pipeline(
lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type='''np''' , )
_a = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ , 15 )
| 377 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''mobilenet_v1'''
def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a="relu6" , _a=True , _a=0.9_9_9 , _a=0.0_2 , _a=0.0_0_1 , **_a , ) -> Tuple:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = depth_multiplier
lowerCAmelCase_ = min_depth
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = tf_padding
lowerCAmelCase_ = classifier_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
class __magic_name__ (__lowercase ):
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __a ( self ) -> Union[str, Any]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __a ( self ) -> List[Any]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __a ( self ) -> List[Any]:
return 1E-4
| 122 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_a: int = """src/diffusers"""
# Matches is_xxx_available()
_a: Tuple = re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
_a: Any = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
_a: Union[str, Any] = """
{0} = None
"""
_a: Tuple = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
_a: Tuple = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def __lowerCAmelCase ( A ):
UpperCAmelCase_ = _re_backend.findall(A )
if len(A ) == 0:
return None
return "_and_".join(A )
def __lowerCAmelCase ( ):
with open(os.path.join(A , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase_ = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase_ = 0
UpperCAmelCase_ = {}
# Go through the end of the file
while line_index < len(A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase_ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCAmelCase_ = []
# Until we unindent, add backend objects to the list
while line_index < len(A ) and len(lines[line_index] ) > 1:
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _re_single_line_import.search(A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(A ) > 0:
UpperCAmelCase_ = objects
else:
line_index += 1
return backend_specific_objects
def __lowerCAmelCase ( A , A ):
if name.isupper():
return DUMMY_CONSTANT.format(A )
elif name.islower():
return DUMMY_FUNCTION.format(A , A )
else:
return DUMMY_CLASS.format(A , A )
def __lowerCAmelCase ( A=None ):
if backend_specific_objects is None:
UpperCAmelCase_ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase_ = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase_ = "[" + ", ".join(F"\"{b}\"" for b in backend.split("_and_" ) ) + "]"
UpperCAmelCase_ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(A , A ) for o in objects] )
UpperCAmelCase_ = dummy_file
return dummy_files
def __lowerCAmelCase ( A=False ):
UpperCAmelCase_ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase_ = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCAmelCase_ = os.path.join(A , "utils" )
UpperCAmelCase_ = {
backend: os.path.join(A , F"dummy_{short_names.get(A , A )}_objects.py" )
for backend in dummy_files.keys()
}
UpperCAmelCase_ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(A ):
with open(A , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase_ = f.read()
else:
UpperCAmelCase_ = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"Updating diffusers.utils.dummy_{short_names.get(A , A )}_objects.py as the main "
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"diffusers.utils.dummy_{short_names.get(A , A )}_objects.py. Run `make fix-copies` "
"to fix this." )
if __name__ == "__main__":
_a: Dict = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_a: Any = parser.parse_args()
check_dummies(args.fix_and_overwrite) | 162 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowerCamelCase = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: int = None , UpperCamelCase__: List[str] = None , UpperCamelCase__: Tuple = None , UpperCamelCase__: str = None , UpperCamelCase__: List[Any] = None , UpperCamelCase__: List[Any] = False , ):
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
SCREAMING_SNAKE_CASE__ = []
# custom device map
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE__ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE__ = get_keys_to_not_convert(UpperCamelCase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(UpperCamelCase__ )
# compatibility with peft
SCREAMING_SNAKE_CASE__ = load_in_abit
SCREAMING_SNAKE_CASE__ = load_in_abit
SCREAMING_SNAKE_CASE__ = get_parameter_device(UpperCamelCase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
SCREAMING_SNAKE_CASE__ = replace_with_bnb_layers(UpperCamelCase__ , UpperCamelCase__ , modules_to_not_convert=UpperCamelCase__ )
# convert param to the right dtype
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE__ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(UpperCamelCase__ ):
param.to(UpperCamelCase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE__ = replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , modules_to_not_convert=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = get_quantized_model_device_map(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , max_memory=UpperCamelCase__ , no_split_module_classes=UpperCamelCase__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=UpperCamelCase__ , offload_state_dict=UpperCamelCase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(UpperCamelCase__ , device_map=UpperCamelCase__ , offload_dir=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any]=None , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: List[str]=None ):
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{\'\':torch.cuda.current_device()}`.""" )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or """
"""\'sequential\'.""" )
SCREAMING_SNAKE_CASE__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = special_dtypes
SCREAMING_SNAKE_CASE__ = no_split_module_classes
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE__ = get_balanced_memory(
UpperCamelCase__ , low_zero=(device_map == """balanced_low_0""") , max_memory=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ = max_memory
SCREAMING_SNAKE_CASE__ = infer_auto_device_map(UpperCamelCase__ , **UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: str , UpperCamelCase__: Any=None , UpperCamelCase__: Union[str, Any]=None ):
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = _replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Dict=None , ):
SCREAMING_SNAKE_CASE__ = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ = []
current_key_name.append(UpperCamelCase__ )
if isinstance(UpperCamelCase__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE__ = '''.'''.join(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=UpperCamelCase__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can\'t be both False""" )
SCREAMING_SNAKE_CASE__ = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE__ = module.bias.data
bnb_module.requires_grad_(UpperCamelCase__ )
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ = _replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict ):
with init_empty_weights():
SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE__ = find_tied_parameters(UpperCamelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ , [] )
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ = False
if hasattr(UpperCamelCase__ , """base_model_prefix""" ):
SCREAMING_SNAKE_CASE__ = not hasattr(UpperCamelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ = list(model.named_children() )
SCREAMING_SNAKE_CASE__ = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = list(set(UpperCamelCase__ ) ) + list(UpperCamelCase__ )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ = ['''.weight''', '''.bias''']
SCREAMING_SNAKE_CASE__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ = name.replace(UpperCamelCase__ , """""" )
filtered_module_names.append(UpperCamelCase__ )
return filtered_module_names
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
for m in model.modules():
if isinstance(UpperCamelCase__ , bnb.nn.Linearabit ):
return True
return False
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
return next(parameter.parameters() ).device
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any ):
if fpaa_statistics is None:
set_module_tensor_to_device(UpperCamelCase__ , UpperCamelCase__ , 0 , dtype=UpperCamelCase__ , value=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = param_name
SCREAMING_SNAKE_CASE__ = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ = new_module
SCREAMING_SNAKE_CASE__ = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE__ = False
offload_weight(module._parameters[tensor_name] , UpperCamelCase__ , UpperCamelCase__ , index=UpperCamelCase__ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , UpperCamelCase__ , index=UpperCamelCase__ , )
else:
offload_weight(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , index=UpperCamelCase__ )
offload_weight(UpperCamelCase__ , param_name.replace("""weight""" , """SCB""" ) , UpperCamelCase__ , index=UpperCamelCase__ )
set_module_tensor_to_device(UpperCamelCase__ , UpperCamelCase__ , """meta""" , dtype=UpperCamelCase__ , value=torch.empty(*param.size() ) ) | 717 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , UpperCamelCase__ , )
if isinstance(UpperCamelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image[0].size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE__ = np.concatenate(UpperCamelCase__ , axis=0 )
SCREAMING_SNAKE_CASE__ = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE__ = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCamelCase__ , dim=0 )
return image
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(UpperCamelCase__ , torch.Tensor ):
return mask
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = mask[0].size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE__ = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE__ = np.concatenate(UpperCamelCase__ , axis=0 )
SCREAMING_SNAKE_CASE__ = mask.astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
elif isinstance(mask[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCamelCase__ , dim=0 )
return mask
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
lowerCamelCase_ = 42
def __init__( self :Any , __A :List[Any] , __A :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self :str , __A :Union[torch.Tensor, PIL.Image.Image] , __A :Union[torch.Tensor, PIL.Image.Image] , __A :int = 250 , __A :float = 0.0 , __A :int = 10 , __A :int = 10 , __A :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __A :Optional[str] = "pil" , __A :bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = image
SCREAMING_SNAKE_CASE__ = _preprocess_image(__A )
SCREAMING_SNAKE_CASE__ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = _preprocess_mask(__A )
SCREAMING_SNAKE_CASE__ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__A , __A ) and len(__A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = original_image.shape
SCREAMING_SNAKE_CASE__ = randn_tensor(__A , generator=__A , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__A , __A , __A , self.device )
SCREAMING_SNAKE_CASE__ = eta
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE__ = generator[0] if isinstance(__A , __A ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE__ = self.unet(__A , __A ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__A , __A , __A , __A , __A , __A ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE__ = self.scheduler.undo_step(__A , __A , __A )
SCREAMING_SNAKE_CASE__ = t
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A ) | 59 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = 1
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
return image
@property
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(SCREAMING_SNAKE_CASE_ )
@property
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
def extract(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
class lowerCAmelCase :
def __init__( self ) -> Tuple:
'''simple docstring'''
__snake_case = torch.ones([0] )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
self.pixel_values.to(SCREAMING_SNAKE_CASE_ )
return self
return Out()
return extract
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.dummy_cond_unet
__snake_case = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
__snake_case = self.dummy_vae
__snake_case = self.dummy_text_encoder
__snake_case = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
__snake_case = 77
__snake_case = self.dummy_image.to(SCREAMING_SNAKE_CASE_ )
__snake_case = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__snake_case = AltDiffusionImgaImgPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
__snake_case = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=SCREAMING_SNAKE_CASE_ )
__snake_case = alt_pipe.to(SCREAMING_SNAKE_CASE_ )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = """A painting of a squirrel eating a burger"""
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=SCREAMING_SNAKE_CASE_ , )
__snake_case = output.images
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
__snake_case = image[0, -3:, -3:, -1]
__snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__snake_case = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.dummy_cond_unet
__snake_case = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
__snake_case = self.dummy_vae
__snake_case = self.dummy_text_encoder
__snake_case = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
__snake_case = 77
__snake_case = self.dummy_image.to(SCREAMING_SNAKE_CASE_ )
# put models in fp16
__snake_case = unet.half()
__snake_case = vae.half()
__snake_case = bert.half()
# make sure here that pndm scheduler skips prk
__snake_case = AltDiffusionImgaImgPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
__snake_case = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=SCREAMING_SNAKE_CASE_ )
__snake_case = alt_pipe.to(SCREAMING_SNAKE_CASE_ )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = """A painting of a squirrel eating a burger"""
__snake_case = torch.manual_seed(0 )
__snake_case = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type='''np''' , image=SCREAMING_SNAKE_CASE_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
__snake_case = init_image.resize((760, 504) )
__snake_case = """BAAI/AltDiffusion"""
__snake_case = AltDiffusionImgaImgPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
__snake_case = """A fantasy landscape, trending on artstation"""
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , strength=0.75 , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
__snake_case = output.images[0]
__snake_case = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
__snake_case = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
__snake_case = """BAAI/AltDiffusion"""
__snake_case = AltDiffusionImgaImgPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
__snake_case = """A fantasy landscape, trending on artstation"""
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , strength=0.75 , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
__snake_case = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 24 |
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
UpperCAmelCase_: List[str] = 0
UpperCAmelCase_: Tuple = len(lowerCAmelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase_: Dict = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase__ ):
return None
UpperCAmelCase_: str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCAmelCase_: int = left
UpperCAmelCase_: str = point
elif point > right:
UpperCAmelCase_: List[str] = right
UpperCAmelCase_: Optional[int] = point
else:
if item < current_item:
UpperCAmelCase_: Optional[Any] = point - 1
else:
UpperCAmelCase_: Any = point + 1
return None
def lowerCAmelCase_ (lowerCAmelCase__: Dict , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Any , lowerCAmelCase__: Any ):
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase_: Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
elif point > right:
return interpolation_search_by_recursion(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowerCAmelCase__ , lowerCAmelCase__ , point + 1 , lowerCAmelCase__ )
def lowerCAmelCase_ (lowerCAmelCase__: Dict ):
"""simple docstring"""
if collection != sorted(lowerCAmelCase__ ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : int = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a : int = 67
a : Any = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 556 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase_ :Any = DDIMPipeline
UpperCAmelCase_ :str = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCAmelCase_ :List[str] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
UpperCAmelCase_ :Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCAmelCase_ :str = False
def __lowerCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
lowerCAmelCase_ :List[Any] = DDIMScheduler()
lowerCAmelCase_ :int = {"""unet""": unet, """scheduler""": scheduler}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]:
if str(_a ).startswith("""mps""" ):
lowerCAmelCase_ :int = torch.manual_seed(_a )
else:
lowerCAmelCase_ :int = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ :List[Any] = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Optional[Any] = """cpu"""
lowerCAmelCase_ :Any = self.get_dummy_components()
lowerCAmelCase_ :int = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ :List[Any] = self.get_dummy_inputs(_a )
lowerCAmelCase_ :List[str] = pipe(**_a ).images
lowerCAmelCase_ :Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowerCAmelCase_ :Union[str, Any] = np.array(
[1.0_00E00, 5.7_17E-01, 4.7_17E-01, 1.0_00E00, 0.0_00E00, 1.0_00E00, 3.0_00E-04, 0.0_00E00, 9.0_00E-04] )
lowerCAmelCase_ :Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = """google/ddpm-cifar10-32"""
lowerCAmelCase_ :Optional[int] = UNetaDModel.from_pretrained(_a )
lowerCAmelCase_ :Tuple = DDIMScheduler()
lowerCAmelCase_ :str = DDIMPipeline(unet=_a , scheduler=_a )
ddim.to(_a )
ddim.set_progress_bar_config(disable=_a )
lowerCAmelCase_ :int = torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = ddim(generator=_a , eta=0.0 , output_type="""numpy""" ).images
lowerCAmelCase_ :Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ :Tuple = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :int = """google/ddpm-ema-bedroom-256"""
lowerCAmelCase_ :Any = UNetaDModel.from_pretrained(_a )
lowerCAmelCase_ :str = DDIMScheduler.from_pretrained(_a )
lowerCAmelCase_ :Optional[Any] = DDIMPipeline(unet=_a , scheduler=_a )
ddpm.to(_a )
ddpm.set_progress_bar_config(disable=_a )
lowerCAmelCase_ :Dict = torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = ddpm(generator=_a , output_type="""numpy""" ).images
lowerCAmelCase_ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase_ :Tuple = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 710 |
"""simple docstring"""
import numpy as np
def _snake_case ( lowercase__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def _snake_case ( lowercase__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : Tuple = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "longformer"
def __init__( self : List[str] , _lowercase : Union[List[int], int] = 5_12 , _lowercase : int = 2 , _lowercase : int = 1 , _lowercase : int = 0 , _lowercase : int = 2 , _lowercase : int = 3_05_22 , _lowercase : int = 7_68 , _lowercase : int = 12 , _lowercase : int = 12 , _lowercase : int = 30_72 , _lowercase : str = "gelu" , _lowercase : float = 0.1 , _lowercase : float = 0.1 , _lowercase : int = 5_12 , _lowercase : int = 2 , _lowercase : float = 0.02 , _lowercase : float = 1E-12 , _lowercase : bool = False , **_lowercase : List[str] , ):
super().__init__(pad_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = attention_window
__UpperCAmelCase = sep_token_id
__UpperCAmelCase = bos_token_id
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = onnx_export
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : List[str] , _lowercase : "PretrainedConfig" , _lowercase : str = "default" , _lowercase : "List[PatchingSpec]" = None ):
super().__init__(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = True
@property
def a ( self : Dict ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def a ( self : int ):
__UpperCAmelCase = super().outputs
if self.task == "default":
__UpperCAmelCase = {0: '''batch'''}
return outputs
@property
def a ( self : Any ):
return 1E-4
@property
def a ( self : Optional[Any] ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def a ( self : List[str] , _lowercase : "PreTrainedTokenizerBase" , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
__UpperCAmelCase = super().generate_dummy_inputs(
preprocessor=_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
__UpperCAmelCase = 1
return inputs
| 49 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : Optional[int] = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : int = 'van'
def __init__( self , __UpperCamelCase=2_24 , __UpperCamelCase=3 , __UpperCamelCase=[7, 3, 3, 3] , __UpperCamelCase=[4, 2, 2, 2] , __UpperCamelCase=[64, 1_28, 3_20, 5_12] , __UpperCamelCase=[3, 3, 12, 3] , __UpperCamelCase=[8, 8, 4, 4] , __UpperCamelCase="gelu" , __UpperCamelCase=0.02 , __UpperCamelCase=1E-6 , __UpperCamelCase=1E-2 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , **__UpperCamelCase , ) -> Dict:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = image_size
__UpperCamelCase : int = num_channels
__UpperCamelCase : str = patch_sizes
__UpperCamelCase : Optional[int] = strides
__UpperCamelCase : str = hidden_sizes
__UpperCamelCase : str = depths
__UpperCamelCase : Dict = mlp_ratios
__UpperCamelCase : Union[str, Any] = hidden_act
__UpperCamelCase : List[str] = initializer_range
__UpperCamelCase : Tuple = layer_norm_eps
__UpperCamelCase : List[Any] = layer_scale_init_value
__UpperCamelCase : Optional[int] = drop_path_rate
__UpperCamelCase : Optional[int] = dropout_rate | 327 | 0 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
_lowercase = parser.parse_args()
if args.model_type == "bert":
_lowercase = BertForMaskedLM.from_pretrained(args.model_name)
_lowercase = "bert"
else:
raise ValueError("args.model_type should be \"bert\".")
_lowercase = model.state_dict()
_lowercase = {}
for w in ["word_embeddings", "position_embeddings"]:
_lowercase = state_dict[F"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
_lowercase = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"]
_lowercase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_lowercase = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
_lowercase = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
_lowercase = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
_lowercase = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
_lowercase = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
_lowercase = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
_lowercase = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
_lowercase = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
_lowercase = state_dict["cls.predictions.decoder.weight"]
_lowercase = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
_lowercase = state_dict[F"cls.predictions.transform.dense.{w}"]
_lowercase = state_dict[F"cls.predictions.transform.LayerNorm.{w}"]
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 526 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["MobileViTFeatureExtractor"]
_lowercase = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 526 | 1 |
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
_SCREAMING_SNAKE_CASE = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
_SCREAMING_SNAKE_CASE = logging.WARNING
def _lowerCAmelCase ( ):
__lowercase = os.getenv('''DATASETS_VERBOSITY''' , lowerCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option DATASETS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def _lowerCAmelCase ( ):
return __name__.split('''.''' )[0]
def _lowerCAmelCase ( ):
return logging.getLogger(_get_library_name() )
def _lowerCAmelCase ( ):
# Apply our default configuration to the library root logger.
__lowercase = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def _lowerCAmelCase ( ):
__lowercase = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def _lowerCAmelCase ( lowerCamelCase_ : Optional[str] = None ):
if name is None:
__lowercase = _get_library_name()
return logging.getLogger(lowerCamelCase_ )
def _lowerCAmelCase ( ):
return _get_library_root_logger().getEffectiveLevel()
def _lowerCAmelCase ( lowerCamelCase_ : int ):
_get_library_root_logger().setLevel(lowerCamelCase_ )
def _lowerCAmelCase ( ):
return set_verbosity(lowerCamelCase_ )
def _lowerCAmelCase ( ):
return set_verbosity(lowerCamelCase_ )
def _lowerCAmelCase ( ):
return set_verbosity(lowerCamelCase_ )
def _lowerCAmelCase ( ):
return set_verbosity(lowerCamelCase_ )
def _lowerCAmelCase ( ):
__lowercase = False
def _lowerCAmelCase ( ):
__lowercase = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class __lowercase :
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple: # pylint: disable=unused-argument
'''simple docstring'''
__lowercase = args[0] if args else None
def __iter__(self ) -> Any:
'''simple docstring'''
return iter(self._iterator )
def __getattr__(self ,_lowerCamelCase ) -> Tuple:
'''simple docstring'''
def empty_fn(*_lowerCamelCase ,**_lowerCamelCase ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> Union[str, Any]:
'''simple docstring'''
return self
def __exit__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
return
_SCREAMING_SNAKE_CASE = True
class __lowercase :
'''simple docstring'''
def __call__(self ,*_lowerCamelCase ,_lowerCamelCase=False ,**_lowerCamelCase ) -> str:
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_lowerCamelCase ,**_lowerCamelCase )
else:
return EmptyTqdm(*_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_SCREAMING_SNAKE_CASE = _tqdm_cls()
def _lowerCAmelCase ( ):
global _tqdm_active
return bool(_tqdm_active )
def _lowerCAmelCase ( ):
global _tqdm_active
__lowercase = True
def _lowerCAmelCase ( ):
global _tqdm_active
__lowercase = False
| 502 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowercase :
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
a : Optional[int] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = ObjectDetectionPipeline(model=_lowerCamelCase ,image_processor=_lowerCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ,threshold=0.0 )
self.assertGreater(len(_lowerCamelCase ) ,0 )
for detected_object in outputs:
self.assertEqual(
_lowerCamelCase ,{
'''score''': ANY(_lowerCamelCase ),
'''label''': ANY(_lowerCamelCase ),
'''box''': {'''xmin''': ANY(_lowerCamelCase ), '''ymin''': ANY(_lowerCamelCase ), '''xmax''': ANY(_lowerCamelCase ), '''ymax''': ANY(_lowerCamelCase )},
} ,)
import datasets
__lowercase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' ,'''image''' ,split='''test''' )
__lowercase = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
__lowercase = object_detector(_lowerCamelCase ,threshold=0.0 )
self.assertEqual(len(_lowerCamelCase ) ,len(_lowerCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(_lowerCamelCase ) ,0 )
for detected_object in outputs:
self.assertEqual(
_lowerCamelCase ,{
'''score''': ANY(_lowerCamelCase ),
'''label''': ANY(_lowerCamelCase ),
'''box''': {'''xmin''': ANY(_lowerCamelCase ), '''ymin''': ANY(_lowerCamelCase ), '''xmax''': ANY(_lowerCamelCase ), '''ymax''': ANY(_lowerCamelCase )},
} ,)
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
@require_torch
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
__lowercase = AutoModelForObjectDetection.from_pretrained(_lowerCamelCase )
__lowercase = AutoFeatureExtractor.from_pretrained(_lowerCamelCase )
__lowercase = ObjectDetectionPipeline(model=_lowerCamelCase ,feature_extractor=_lowerCamelCase )
__lowercase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ,threshold=0.0 )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
] ,)
__lowercase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] ,threshold=0.0 ,)
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
] ,)
@require_torch
@slow
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = '''facebook/detr-resnet-50'''
__lowercase = AutoModelForObjectDetection.from_pretrained(_lowerCamelCase )
__lowercase = AutoFeatureExtractor.from_pretrained(_lowerCamelCase )
__lowercase = ObjectDetectionPipeline(model=_lowerCamelCase ,feature_extractor=_lowerCamelCase )
__lowercase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] ,)
__lowercase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] ,)
@require_torch
@slow
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = '''facebook/detr-resnet-50'''
__lowercase = pipeline('''object-detection''' ,model=_lowerCamelCase )
__lowercase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] ,)
__lowercase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] ,)
@require_torch
@slow
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = 0.9_9_8_5
__lowercase = '''facebook/detr-resnet-50'''
__lowercase = pipeline('''object-detection''' ,model=_lowerCamelCase )
__lowercase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ,threshold=_lowerCamelCase )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] ,)
@require_torch
@require_pytesseract
@slow
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = '''Narsil/layoutlmv3-finetuned-funsd'''
__lowercase = 0.9_9_9_3
__lowercase = pipeline('''object-detection''' ,model=_lowerCamelCase ,threshold=_lowerCamelCase )
__lowercase = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ,decimals=4 ) ,[
{'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
] ,)
| 502 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _UpperCAmelCase :
def __init__( self : Any , a : Tuple , a : Any=1_3 , a : Union[str, Any]=7 , a : Any=True , a : Optional[int]=True , a : Optional[int]=True , a : int=True , a : Union[str, Any]=9_9 , a : Any=[1, 1, 2] , a : List[Any]=1 , a : Union[str, Any]=3_2 , a : Any=4 , a : Optional[int]=8 , a : Optional[Any]=3_7 , a : List[Any]="gelu_new" , a : Tuple=0.1 , a : Tuple=0.1 , a : Dict=0.0 , a : Optional[int]=5_1_2 , a : Dict=3 , a : Dict=0.02 , a : Dict=3 , a : Optional[Any]=4 , a : List[Any]=None , a : Optional[Any]=False , ):
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : int = batch_size
lowercase_ : Optional[int] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : Dict = use_input_mask
lowercase_ : Optional[int] = use_token_type_ids
lowercase_ : Any = use_labels
lowercase_ : Optional[Any] = vocab_size
lowercase_ : int = block_sizes
lowercase_ : List[str] = num_decoder_layers
lowercase_ : Dict = d_model
lowercase_ : Optional[int] = n_head
lowercase_ : str = d_head
lowercase_ : Dict = d_inner
lowercase_ : List[str] = hidden_act
lowercase_ : int = hidden_dropout
lowercase_ : Optional[int] = attention_dropout
lowercase_ : Dict = activation_dropout
lowercase_ : Union[str, Any] = max_position_embeddings
lowercase_ : Tuple = type_vocab_size
lowercase_ : Optional[Any] = 2
lowercase_ : List[str] = num_labels
lowercase_ : List[str] = num_choices
lowercase_ : int = scope
lowercase_ : Optional[int] = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase_ : str = n_head
# Used in the tests to check the size of the first hidden state
lowercase_ : Optional[int] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase_ : Tuple = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase_ : List[str] = self.num_hidden_layers + 2
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : int = None
if self.use_input_mask:
lowercase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : int = None
if self.use_token_type_ids:
lowercase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : int = None
lowercase_ : Dict = None
lowercase_ : str = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Optional[Any] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase__ ( self : int , a : Tuple , a : List[Any] , a : List[str] , a : List[str] , a : Tuple , a : Union[str, Any] , a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ : int = TFFunnelModel(config=a )
lowercase_ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase_ : int = model(a )
lowercase_ : Dict = [input_ids, input_mask]
lowercase_ : Tuple = model(a )
lowercase_ : List[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowercase_ : str = False
lowercase_ : Optional[Any] = TFFunnelModel(config=a )
lowercase_ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowercase_ : Optional[int] = False
lowercase_ : Dict = TFFunnelModel(config=a )
lowercase_ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCAmelCase__ ( self : str , a : str , a : Optional[int] , a : Tuple , a : Any , a : List[Any] , a : str , a : Any , ):
'''simple docstring'''
lowercase_ : int = TFFunnelBaseModel(config=a )
lowercase_ : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase_ : Optional[int] = model(a )
lowercase_ : Optional[int] = [input_ids, input_mask]
lowercase_ : int = model(a )
lowercase_ : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowercase_ : List[str] = False
lowercase_ : Optional[int] = TFFunnelBaseModel(config=a )
lowercase_ : Optional[int] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowercase_ : str = False
lowercase_ : Dict = TFFunnelBaseModel(config=a )
lowercase_ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCAmelCase__ ( self : int , a : Dict , a : int , a : Optional[int] , a : Dict , a : Any , a : Optional[int] , a : Optional[Any] , ):
'''simple docstring'''
lowercase_ : Optional[Any] = TFFunnelForPreTraining(config=a )
lowercase_ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase_ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self : str , a : Dict , a : Any , a : Optional[Any] , a : str , a : str , a : Union[str, Any] , a : List[str] , ):
'''simple docstring'''
lowercase_ : Optional[Any] = TFFunnelForMaskedLM(config=a )
lowercase_ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase_ : int = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : Tuple , a : Optional[int] , a : List[Any] , a : Tuple , a : str , a : Dict , a : int , a : Dict , ):
'''simple docstring'''
lowercase_ : List[str] = self.num_labels
lowercase_ : List[Any] = TFFunnelForSequenceClassification(config=a )
lowercase_ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase_ : Dict = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : List[Any] , a : List[str] , a : Any , a : int , a : str , a : Optional[Any] , a : List[str] , a : Tuple , ):
'''simple docstring'''
lowercase_ : List[str] = self.num_choices
lowercase_ : Tuple = TFFunnelForMultipleChoice(config=a )
lowercase_ : str = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
lowercase_ : Union[str, Any] = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
lowercase_ : Tuple = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
lowercase_ : Tuple = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowercase_ : List[str] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self : int , a : int , a : List[Any] , a : Any , a : int , a : Union[str, Any] , a : Union[str, Any] , a : Any , ):
'''simple docstring'''
lowercase_ : Tuple = self.num_labels
lowercase_ : Tuple = TFFunnelForTokenClassification(config=a )
lowercase_ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase_ : str = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : List[str] , a : List[str] , a : Any , a : int , a : Optional[int] , a : str , a : Any , a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ : List[str] = TFFunnelForQuestionAnswering(config=a )
lowercase_ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase_ : Dict = model(a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[str] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = config_and_inputs
lowercase_ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( snake_case , snake_case , unittest.TestCase ):
__lowerCamelCase: Union[str, Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCamelCase: str = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase: Tuple = False
__lowerCamelCase: Union[str, Any] = False
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : List[str] = TFFunnelModelTester(self )
lowercase_ : int = ConfigTester(self , config_class=a )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
@require_tf
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__lowerCamelCase: int = False
__lowerCamelCase: List[Any] = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[Any] = TFFunnelModelTester(self , base=a )
lowercase_ : int = ConfigTester(self , config_class=a )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*a )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a )
| 640 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640 | 1 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _snake_case ( ):
"""simple docstring"""
print("""Making key files...""" )
make_key_files("""rsa""" , 1024 )
print("""Key files generation successful.""" )
def _snake_case ( A_ : int ):
"""simple docstring"""
print("""Generating prime p...""" )
a_ : Any = rabinMiller.generate_large_prime(A_ )
print("""Generating prime q...""" )
a_ : Optional[int] = rabinMiller.generate_large_prime(A_ )
a_ : str = p * q
print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" )
while True:
a_ : Optional[int] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(A_ , (p - 1) * (q - 1) ) == 1:
break
print("""Calculating d that is mod inverse of e...""" )
a_ : Union[str, Any] = cryptoMath.find_mod_inverse(A_ , (p - 1) * (q - 1) )
a_ : List[Any] = (n, e)
a_ : List[str] = (n, d)
return (public_key, private_key)
def _snake_case ( A_ : str , A_ : int ):
"""simple docstring"""
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print("""\nWARNING:""" )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
a_ , a_ : Optional[Any] = generate_key(A_ )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , """w""" ) as out_file:
out_file.write(f'''{key_size},{public_key[0]},{public_key[1]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , """w""" ) as out_file:
out_file.write(f'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 577 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
a_ : Union[str, Any] = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
a_ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCAmelCase_ , os.listdir(lowerCAmelCase_ )[0] , """snapshots""" ) )]
a_ : Any = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCAmelCase_ )
a_ : Optional[int] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : List[str] = jax.random.PRNGKey(0 )
a_ : Optional[Any] = 4
a_ : Dict = jax.device_count()
a_ : Any = num_samples * [prompt]
a_ : Optional[Any] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : str = replicate(lowerCAmelCase_ )
a_ : List[Any] = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Tuple = shard(lowerCAmelCase_ )
a_ : Any = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3
assert np.abs(np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 49947.875 ) < 5E-1
a_ : List[str] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCAmelCase_ ) == num_samples
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=lowerCAmelCase_ )
a_ : int = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : int = jax.random.PRNGKey(0 )
a_ : Any = 50
a_ : List[str] = jax.device_count()
a_ : Any = num_samples * [prompt]
a_ : List[str] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : int = replicate(lowerCAmelCase_ )
a_ : Optional[int] = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : int = shard(lowerCAmelCase_ )
a_ : Any = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase_ )
a_ : Optional[int] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : List[Any] = jax.random.PRNGKey(0 )
a_ : str = 50
a_ : List[Any] = jax.device_count()
a_ : Optional[int] = num_samples * [prompt]
a_ : List[str] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : Optional[int] = replicate(lowerCAmelCase_ )
a_ : Dict = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Union[str, Any] = shard(lowerCAmelCase_ )
a_ : List[Any] = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa )
a_ : Optional[Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : Optional[Any] = jax.random.PRNGKey(0 )
a_ : str = 50
a_ : Optional[int] = jax.device_count()
a_ : List[Any] = num_samples * [prompt]
a_ : List[Any] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : str = replicate(lowerCAmelCase_ )
a_ : Dict = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Optional[Any] = shard(lowerCAmelCase_ )
a_ : Optional[Any] = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , )
a_ , a_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
a_ : Optional[Any] = scheduler.create_state()
a_ : Dict = scheduler_state
a_ : int = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : Tuple = jax.random.PRNGKey(0 )
a_ : Tuple = 50
a_ : int = jax.device_count()
a_ : Any = num_samples * [prompt]
a_ : Optional[int] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : Dict = replicate(lowerCAmelCase_ )
a_ : List[Any] = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : int = shard(lowerCAmelCase_ )
a_ : int = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : Optional[Any] = jax.device_count()
a_ : Optional[int] = num_samples * [prompt]
a_ : Optional[int] = jax.random.split(jax.random.PRNGKey(0 ) , lowerCAmelCase_ )
a_ , a_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase_ , )
a_ : Dict = replicate(lowerCAmelCase_ )
a_ : Any = pipeline.prepare_inputs(lowerCAmelCase_ )
a_ : Optional[int] = shard(lowerCAmelCase_ )
a_ : str = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
a_ : Union[str, Any] = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
a_ , a_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase_ , use_memory_efficient_attention=lowerCAmelCase_ , )
a_ : Optional[Any] = replicate(lowerCAmelCase_ )
a_ : int = pipeline.prepare_inputs(lowerCAmelCase_ )
a_ : Optional[int] = shard(lowerCAmelCase_ )
a_ : Union[str, Any] = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
a_ : Optional[Any] = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 577 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A : str = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_A : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 189 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 189 | 1 |
import argparse
import copy
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = {}
with open(lowercase__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
_list.append([line.split()[1], line.split()[2]] )
__SCREAMING_SNAKE_CASE : Dict = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__SCREAMING_SNAKE_CASE : List[Any] = []
_list.append([line.split()[0], line.split()[2]] )
__SCREAMING_SNAKE_CASE : Tuple = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _UpperCamelCase ( lowercase__ , lowercase__ ):
with open(lowercase__ ) as f:
__SCREAMING_SNAKE_CASE : Optional[int] = f.read(1 )
__SCREAMING_SNAKE_CASE : Any = start_node
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Dict = start_node
__SCREAMING_SNAKE_CASE : List[Any] = 0
while visiting not in first_solution:
__SCREAMING_SNAKE_CASE : Dict = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowercase__ ) and k[0] not in first_solution:
__SCREAMING_SNAKE_CASE : Tuple = k[1]
__SCREAMING_SNAKE_CASE : str = k[0]
first_solution.append(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = distance_of_first_solution + int(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = best_node
first_solution.append(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for n in solution[1:-1]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = solution.index(lowercase__ )
for kn in solution[1:-1]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = solution.index(lowercase__ )
if n == kn:
continue
__SCREAMING_SNAKE_CASE : str = copy.deepcopy(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = kn
__SCREAMING_SNAKE_CASE : List[str] = n
__SCREAMING_SNAKE_CASE : List[Any] = 0
for k in _tmp[:-1]:
__SCREAMING_SNAKE_CASE : Optional[int] = _tmp[_tmp.index(lowercase__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__SCREAMING_SNAKE_CASE : int = distance + int(i[1] )
_tmp.append(lowercase__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__SCREAMING_SNAKE_CASE : List[str] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowercase__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : List[str] = first_solution
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Dict = distance_of_first_solution
__SCREAMING_SNAKE_CASE : Union[str, Any] = solution
while count <= iters:
__SCREAMING_SNAKE_CASE : Dict = find_neighborhood(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Any = neighborhood[index_of_best_solution]
__SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase__ ) - 1
__SCREAMING_SNAKE_CASE : List[str] = False
while not found:
__SCREAMING_SNAKE_CASE : Any = 0
while i < len(lowercase__ ):
if best_solution[i] != solution[i]:
__SCREAMING_SNAKE_CASE : Any = best_solution[i]
__SCREAMING_SNAKE_CASE : Optional[Any] = solution[i]
break
__SCREAMING_SNAKE_CASE : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : List[str] = best_solution[:-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__SCREAMING_SNAKE_CASE : List[Any] = cost
__SCREAMING_SNAKE_CASE : List[Any] = solution
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = index_of_best_solution + 1
__SCREAMING_SNAKE_CASE : List[str] = neighborhood[index_of_best_solution]
if len(lowercase__ ) >= size:
tabu_list.pop(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = count + 1
return best_solution_ever, best_cost
def _UpperCamelCase ( lowercase__=None ):
__SCREAMING_SNAKE_CASE : int = generate_neighbours(args.File )
__SCREAMING_SNAKE_CASE : str = generate_first_solution(
args.File , lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = tabu_search(
lowercase__ , lowercase__ , lowercase__ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 696 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
__magic_name__ : Dict =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__magic_name__ : Union[str, Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
__magic_name__ : List[Any] =proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
__magic_name__ : Optional[int] =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__magic_name__ : str =(max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print("""******************""" )
def lowerCAmelCase_ ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
__magic_name__ : Dict =area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def UpperCamelCase ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , _lowerCAmelCase )
__a = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__a = dataset_size < in_memory_max_size
else:
__a = False
__a = is_small_dataset(_lowerCAmelCase )
assert result == expected
| 702 | """simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def UpperCamelCase ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
__a = quote(_lowerCAmelCase )
return hfh.hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" , revision=_lowerCAmelCase )
| 173 | 0 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
A = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=True ) -> Optional[Any]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}." )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__UpperCAmelCase : List[str] = cached_file(UpperCamelCase , UpperCamelCase , force_download=not use_cached_models )
__UpperCAmelCase : Tuple = config_class.from_json_file(UpperCamelCase )
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : str = True
print(f"Building TensorFlow model from configuration: {config}" )
__UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__UpperCAmelCase : int = cached_file(
UpperCamelCase , UpperCamelCase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__UpperCAmelCase : Union[str, Any] = load_pytorch_checkpoint_in_tfa_model(UpperCamelCase , UpperCamelCase )
if compare_with_pt_model:
__UpperCAmelCase : int = tf_model(tf_model.dummy_inputs , training=UpperCamelCase ) # build the network
__UpperCAmelCase : str = torch.load(UpperCamelCase , map_location="cpu" )
__UpperCAmelCase : List[Any] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=UpperCamelCase , config=UpperCamelCase , state_dict=UpperCamelCase )
with torch.no_grad():
__UpperCAmelCase : Dict = pt_model(**pt_model.dummy_inputs )
__UpperCAmelCase : Optional[int] = pto[0].numpy()
__UpperCAmelCase : List[str] = tfo[0].numpy()
__UpperCAmelCase : List[Any] = np.amax(np.abs(np_pt - np_tf ) )
print(f"Max absolute difference between models outputs {diff}" )
assert diff <= 2e-2, f"Error, model absolute difference is >2e-2: {diff}"
# Save pytorch-model
print(f"Save TensorFlow model to {tf_dump_path}" )
tf_model.save_weights(UpperCamelCase , save_format="h5" )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False , ) -> Any:
"""simple docstring"""
if args_model_type is None:
__UpperCAmelCase : Dict = list(MODEL_CLASSES.keys() )
else:
__UpperCAmelCase : List[Any] = [args_model_type]
for j, model_type in enumerate(UpperCamelCase , start=1 ):
print("=" * 100 )
print(f" Converting model type {j}/{len(UpperCamelCase )}: {model_type}" )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}." )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__UpperCAmelCase : List[Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__UpperCAmelCase : int = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(UpperCamelCase , UpperCamelCase ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f" Skipping finetuned checkpoint {model_shortcut_name}" )
continue
__UpperCAmelCase : Optional[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(f" Skipping not finetuned checkpoint {model_shortcut_name}" )
continue
print(
f" Converting checkpoint {i}/{len(UpperCamelCase )}: {model_shortcut_name} - model_type {model_type}" )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
__UpperCAmelCase : Dict = cached_file(UpperCamelCase , UpperCamelCase , force_download=not use_cached_models )
else:
__UpperCAmelCase : Optional[Any] = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__UpperCAmelCase : int = cached_file(UpperCamelCase , UpperCamelCase , force_download=not use_cached_models )
else:
__UpperCAmelCase : List[str] = model_shortcut_name
if os.path.isfile(UpperCamelCase ):
__UpperCAmelCase : Tuple = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=UpperCamelCase , pytorch_checkpoint_path=UpperCamelCase , config_file=UpperCamelCase , tf_dump_path=os.path.join(UpperCamelCase , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=UpperCamelCase , )
if remove_cached_files:
os.remove(UpperCamelCase )
os.remove(UpperCamelCase )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
A = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 77 |
from __future__ import annotations
import queue
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Union[str, Any] = data
a__ : Tuple = None
a__ : Optional[Any] = None
def SCREAMING_SNAKE_CASE( ) -> TreeNode:
print("\n********Press N to stop entering at any point of time********\n" )
a__ : Tuple = input("Enter the value of the root node: " ).strip().lower()
a__ : queue.Queue = queue.Queue()
a__ : str = TreeNode(int(__UpperCamelCase ) )
q.put(__UpperCamelCase )
while not q.empty():
a__ : str = q.get()
a__ : Optional[Any] = F'Enter the left node of {node_found.data}: '
a__ : List[Any] = input(__UpperCamelCase ).strip().lower() or "n"
if check == "n":
return tree_node
a__ : Optional[int] = TreeNode(int(__UpperCamelCase ) )
a__ : List[str] = left_node
q.put(__UpperCamelCase )
a__ : Dict = F'Enter the right node of {node_found.data}: '
a__ : List[str] = input(__UpperCamelCase ).strip().lower() or "n"
if check == "n":
return tree_node
a__ : str = TreeNode(int(__UpperCamelCase ) )
a__ : Any = right_node
q.put(__UpperCamelCase )
raise
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
a__ : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
a__ : Tuple = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
a__ : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
a__ : int = []
while not q.empty():
a__ : Union[str, Any] = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCamelCase )
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
a__ : list[TreeNode] = []
a__ : Optional[int] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(__UpperCamelCase )
a__ : int = n.left
# end of while means current node doesn't have left child
a__ : Dict = stack.pop()
# start to traverse its right child
a__ : str = n.right
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
a__ : list[TreeNode] = []
a__ : Dict = node
while n or stack:
while n:
stack.append(__UpperCamelCase )
a__ : List[Any] = n.left
a__ : str = stack.pop()
print(n.data , end="," )
a__ : Any = n.right
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
a__ , a__ : List[str] = [], []
a__ : List[Any] = node
stacka.append(__UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
a__ : Optional[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def SCREAMING_SNAKE_CASE( __UpperCamelCase = "" , __UpperCamelCase=50 , __UpperCamelCase="*" ) -> str:
if not s:
return "\n" + width * char
a__ , a__ : int = divmod(width - len(__UpperCamelCase ) - 2 , 2 )
return F'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
lowerCamelCase = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 191 | 0 |
lowerCamelCase_ = [
'''Audio''',
'''Array2D''',
'''Array3D''',
'''Array4D''',
'''Array5D''',
'''ClassLabel''',
'''Features''',
'''Sequence''',
'''Value''',
'''Image''',
'''Translation''',
'''TranslationVariableLanguages''',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages | 161 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Tuple = 'vit_msn'
def __init__( self , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-06 , lowerCamelCase=224 , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=True , **lowerCamelCase , ) -> Optional[int]:
super().__init__(**lowerCamelCase )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias | 161 | 1 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
while b:
UpperCAmelCase , UpperCAmelCase = b, a % b
return a
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(_lowerCAmelCase , a % b )
def __UpperCamelCase ( ):
"""simple docstring"""
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 333 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __magic_name__ ( _a):
@staticmethod
@abstractmethod
def _UpperCAmelCase ( __SCREAMING_SNAKE_CASE : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def _UpperCAmelCase ( self : List[Any] ):
raise NotImplementedError()
| 333 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : List[Any] = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __lowerCAmelCase ( __a ):
snake_case : Optional[Any] = """mobilenet_v1"""
def __init__(self , lowerCAmelCase__=3 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1.0 , lowerCAmelCase__=8 , lowerCAmelCase__="relu6" , lowerCAmelCase__=True , lowerCAmelCase__=0.9_9_9 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=0.0_0_1 , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : List[Any] = image_size
_UpperCAmelCase : str = depth_multiplier
_UpperCAmelCase : Optional[Any] = min_depth
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Optional[int] = tf_padding
_UpperCAmelCase : int = classifier_dropout_prob
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
class __lowerCAmelCase ( __a ):
snake_case : int = version.parse("""1.11""" )
@property
def snake_case_ (self ):
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def snake_case_ (self ):
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def snake_case_ (self ):
return 1e-4
| 156 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
snake_case : int = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
snake_case : str = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = AudioClassificationPipeline(model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
# test with a raw waveform
_UpperCAmelCase : Any = np.zeros((3_4_0_0_0,) )
_UpperCAmelCase : Optional[Any] = np.zeros((1_4_0_0_0,) )
return audio_classifier, [audioa, audio]
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : str = examples
_UpperCAmelCase : Union[str, Any] = audio_classifier(lowerCAmelCase__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowerCAmelCase__ , [
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
] , )
_UpperCAmelCase : int = audio_classifier(lowerCAmelCase__ , top_k=1 )
self.assertEqual(
lowerCAmelCase__ , [
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
] , )
self.run_torchaudio(lowerCAmelCase__ )
@require_torchaudio
def snake_case_ (self , lowerCAmelCase__ ):
import datasets
# test with a local file
_UpperCAmelCase : Optional[int] = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_UpperCAmelCase : List[str] = dataset[0]["""audio"""]["""array"""]
_UpperCAmelCase : Union[str, Any] = audio_classifier(lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
{"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )},
] , )
@require_torch
def snake_case_ (self ):
_UpperCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_UpperCAmelCase : List[Any] = pipeline("""audio-classification""" , model=lowerCAmelCase__ )
_UpperCAmelCase : str = np.ones((8_0_0_0,) )
_UpperCAmelCase : Tuple = audio_classifier(lowerCAmelCase__ , top_k=4 )
_UpperCAmelCase : List[Any] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_UpperCAmelCase : Any = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_UpperCAmelCase : Any = {"""array""": np.ones((8_0_0_0,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_UpperCAmelCase : List[Any] = audio_classifier(lowerCAmelCase__ , top_k=4 )
self.assertIn(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def snake_case_ (self ):
import datasets
_UpperCAmelCase : int = """superb/wav2vec2-base-superb-ks"""
_UpperCAmelCase : List[Any] = pipeline("""audio-classification""" , model=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_UpperCAmelCase : Union[str, Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_UpperCAmelCase : Optional[int] = audio_classifier(lowerCAmelCase__ , top_k=4 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def snake_case_ (self ):
pass
| 156 | 1 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _UpperCamelCase ( UpperCamelCase__ ):
if not is_accelerate_available():
return method
UpperCAmelCase__ : Any = version.parse(accelerate.__version__ ).base_version
if version.parse(UpperCamelCase__ ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *UpperCamelCase__ , **UpperCamelCase__ ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *UpperCamelCase__ , **UpperCamelCase__ )
return wrapper | 407 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__A =True
except (ImportError, ModuleNotFoundError):
__A =False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _UpperCamelCase ( UpperCamelCase__ ):
re.sub("""<n>""" , """""" , UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) ) | 407 | 1 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model
snake_case__ = RemBertConfig.from_json_file(__lowerCAmelCase )
print("Building PyTorch model from configuration: {}".format(str(__lowerCAmelCase ) ) )
snake_case__ = RemBertModel(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print("Save PyTorch model to {}".format(__lowerCAmelCase ) )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 530 |
from math import factorial
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = 100 ):
return sum(int(__lowerCAmelCase ) for x in str(factorial(__lowerCAmelCase ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 530 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__ : Optional[Any] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = ['YolosFeatureExtractor']
A__ : Any = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
A__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 153 |
"""simple docstring"""
import math
def lowercase ( __UpperCamelCase = 100 ) -> int:
__magic_name__ = sum(i * i for i in range(1 , n + 1 ) )
__magic_name__ = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 490 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : List[Any] = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 720 | """simple docstring"""
import qiskit
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
__snake_case = qiskit.Aer.get_backend('''aer_simulator''')
# Create a Quantum Circuit acting on the q register
__snake_case = qiskit.QuantumCircuit(snake_case, snake_case)
# Map the quantum measurement to the classical bits
circuit.measure([0], [0])
# Execute the circuit on the simulator
__snake_case = qiskit.execute(snake_case, snake_case, shots=10_00)
# Return the histogram data of the results of the experiment.
return job.result().get_counts(snake_case)
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""") | 93 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase_ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ = "RegNetConfig"
# Base docstring
UpperCAmelCase_ = "facebook/regnet-y-040"
UpperCAmelCase_ = [1, 10_88, 7, 7]
# Image classification docstring
UpperCAmelCase_ = "facebook/regnet-y-040"
UpperCAmelCase_ = "tabby, tabby cat"
UpperCAmelCase_ = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __UpperCamelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 3 , _UpperCamelCase = 1 , _UpperCamelCase = 1 , _UpperCamelCase = "relu" , ):
super().__init__()
_UpperCAmelCase = nn.Convad(
_UpperCamelCase , _UpperCamelCase , kernel_size=_UpperCamelCase , stride=_UpperCamelCase , padding=kernel_size // 2 , groups=_UpperCamelCase , bias=_UpperCamelCase , )
_UpperCAmelCase = nn.BatchNormad(_UpperCamelCase )
_UpperCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = self.convolution(_UpperCamelCase )
_UpperCAmelCase = self.normalization(_UpperCamelCase )
_UpperCAmelCase = self.activation(_UpperCamelCase )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , _UpperCamelCase ):
super().__init__()
_UpperCAmelCase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
_UpperCAmelCase = config.num_channels
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
_UpperCAmelCase = self.embedder(_UpperCamelCase )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 2 ):
super().__init__()
_UpperCAmelCase = nn.Convad(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , stride=_UpperCamelCase , bias=_UpperCamelCase )
_UpperCAmelCase = nn.BatchNormad(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = self.convolution(_UpperCamelCase )
_UpperCAmelCase = self.normalization(_UpperCamelCase )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
super().__init__()
_UpperCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
_UpperCAmelCase = nn.Sequential(
nn.Convad(_UpperCamelCase , _UpperCamelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_UpperCamelCase , _UpperCamelCase , kernel_size=1 ) , nn.Sigmoid() , )
def UpperCamelCase( self , _UpperCamelCase ):
# b c h w -> b c 1 1
_UpperCAmelCase = self.pooler(_UpperCamelCase )
_UpperCAmelCase = self.attention(_UpperCamelCase )
_UpperCAmelCase = hidden_state * attention
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 ):
super().__init__()
_UpperCAmelCase = in_channels != out_channels or stride != 1
_UpperCAmelCase = max(1 , out_channels // config.groups_width )
_UpperCAmelCase = (
RegNetShortCut(_UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase ) if should_apply_shortcut else nn.Identity()
)
_UpperCAmelCase = nn.Sequential(
RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase , groups=_UpperCamelCase , activation=config.hidden_act ) , RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , activation=_UpperCamelCase ) , )
_UpperCAmelCase = ACTaFN[config.hidden_act]
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = hidden_state
_UpperCAmelCase = self.layer(_UpperCamelCase )
_UpperCAmelCase = self.shortcut(_UpperCamelCase )
hidden_state += residual
_UpperCAmelCase = self.activation(_UpperCamelCase )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 ):
super().__init__()
_UpperCAmelCase = in_channels != out_channels or stride != 1
_UpperCAmelCase = max(1 , out_channels // config.groups_width )
_UpperCAmelCase = (
RegNetShortCut(_UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase ) if should_apply_shortcut else nn.Identity()
)
_UpperCAmelCase = nn.Sequential(
RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase , groups=_UpperCamelCase , activation=config.hidden_act ) , RegNetSELayer(_UpperCamelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_UpperCamelCase , _UpperCamelCase , kernel_size=1 , activation=_UpperCamelCase ) , )
_UpperCAmelCase = ACTaFN[config.hidden_act]
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = hidden_state
_UpperCAmelCase = self.layer(_UpperCamelCase )
_UpperCAmelCase = self.shortcut(_UpperCamelCase )
hidden_state += residual
_UpperCAmelCase = self.activation(_UpperCamelCase )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 2 , _UpperCamelCase = 2 , ):
super().__init__()
_UpperCAmelCase = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
_UpperCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase , ) , *[layer(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for _ in range(depth - 1 )] , )
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = self.layers(_UpperCamelCase )
return hidden_state
class __UpperCamelCase ( nn.Module ):
def __init__( self , _UpperCamelCase ):
super().__init__()
_UpperCAmelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_UpperCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_UpperCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_UpperCamelCase , config.depths[1:] ):
self.stages.append(RegNetStage(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , depth=_UpperCamelCase ) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True ):
_UpperCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCAmelCase = hidden_states + (hidden_state,)
_UpperCAmelCase = stage_module(_UpperCamelCase )
if output_hidden_states:
_UpperCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCamelCase , hidden_states=_UpperCamelCase )
class __UpperCamelCase ( A__ ):
__A : List[str] = RegNetConfig
__A : Any = """regnet"""
__A : Optional[int] = """pixel_values"""
__A : Any = True
def UpperCamelCase( self , _UpperCamelCase ):
if isinstance(_UpperCamelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(_UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=False ):
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = value
UpperCAmelCase_ = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase_ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , A__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __UpperCamelCase ( A__ ):
def __init__( self , _UpperCamelCase ):
super().__init__(_UpperCamelCase )
_UpperCAmelCase = config
_UpperCAmelCase = RegNetEmbeddings(_UpperCamelCase )
_UpperCAmelCase = RegNetEncoder(_UpperCamelCase )
_UpperCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None ):
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.embedder(_UpperCamelCase )
_UpperCAmelCase = self.encoder(
_UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase )
_UpperCAmelCase = encoder_outputs[0]
_UpperCAmelCase = self.pooler(_UpperCamelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCamelCase , pooler_output=_UpperCamelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , A__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __UpperCamelCase ( A__ ):
def __init__( self , _UpperCamelCase ):
super().__init__(_UpperCamelCase )
_UpperCAmelCase = config.num_labels
_UpperCAmelCase = RegNetModel(_UpperCamelCase )
# classification head
_UpperCAmelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , ):
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.regnet(_UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase )
_UpperCAmelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCAmelCase = self.classifier(_UpperCamelCase )
_UpperCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCAmelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCAmelCase = '''single_label_classification'''
else:
_UpperCAmelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
_UpperCAmelCase = MSELoss()
if self.num_labels == 1:
_UpperCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_UpperCAmelCase = loss_fct(_UpperCamelCase , _UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
_UpperCAmelCase = CrossEntropyLoss()
_UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCAmelCase = BCEWithLogitsLoss()
_UpperCAmelCase = loss_fct(_UpperCamelCase , _UpperCamelCase )
if not return_dict:
_UpperCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCamelCase , logits=_UpperCamelCase , hidden_states=outputs.hidden_states ) | 32 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( A__ ):
__A : Dict = ["""image_processor""", """tokenizer"""]
__A : List[str] = """BridgeTowerImageProcessor"""
__A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# add pixel_values + pixel_mask
_UpperCAmelCase = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 32 | 1 |
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Dict =TransfoXLTokenizer
__A : Any =False
__A : Any =False
def UpperCamelCase__ ( self ):
super().setUp()
UpperCAmelCase_ : List[Any] = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase__ ( self ,**_snake_case ):
UpperCAmelCase_ : List[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : str = "<unk> UNwanted , running"
UpperCAmelCase_ : Dict = "<unk> unwanted, running"
return input_text, output_text
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = TransfoXLTokenizer(vocab_file=self.vocab_file ,lower_case=_snake_case )
UpperCAmelCase_ : str = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(_snake_case ,["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) ,[0, 4, 8, 7] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = TransfoXLTokenizer(lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) ,["hello", "!", "how", "are", "you", "?"] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = TransfoXLTokenizer(lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) ,["HeLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = TransfoXLTokenizer(lower_case=_snake_case )
UpperCAmelCase_ : List[str] = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
UpperCAmelCase_ : Optional[int] = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(_snake_case ) ,_snake_case )
self.assertEqual(tokenizer.convert_tokens_to_string(_snake_case ) ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = len(_snake_case )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1" ,1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_snake_case ) ,original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ) ,[1] )
self.assertEqual(tokenizer.decode([1] ) ,"new1" )
| 323 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
_lowerCamelCase = """"""
_lowerCamelCase = """"""
_lowerCamelCase = """"""
_lowerCamelCase = """"""
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = tweepy.OAuthHandler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
auth.set_access_token(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = tweepy.API(_SCREAMING_SNAKE_CASE )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase_ : str = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase_ : Dict = api.user_timeline(screen_name=_SCREAMING_SNAKE_CASE , count=2_00 )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# save the id of the oldest tweet less one
UpperCAmelCase_ : Any = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_SCREAMING_SNAKE_CASE ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase_ : Tuple = api.user_timeline(
screen_name=_SCREAMING_SNAKE_CASE , count=2_00 , max_id=_SCREAMING_SNAKE_CASE )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# update the id of the oldest tweet less one
UpperCAmelCase_ : Optional[int] = alltweets[-1].id - 1
print(F'''...{len(_SCREAMING_SNAKE_CASE )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase_ : str = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , "w" ) as f:
UpperCAmelCase_ : str = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 323 | 1 |
def a__ (__lowercase :int ) -> bool:
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
_A : Any = 4
_A : List[Any] = (1 << p) - 1
for _ in range(p - 2 ):
_A : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 206 |
def a__ (__lowercase :str , __lowercase :str ) -> float:
def get_matched_characters(__lowercase :str , __lowercase :str ) -> str:
_A : Union[str, Any] = []
_A : Dict = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_A : Tuple = int(max(0 , i - limit ) )
_A : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__lowercase )
_A : List[Any] = f"""{_stra[0:_stra.index(__lowercase )]} {_stra[_stra.index(__lowercase ) + 1:]}"""
return "".join(__lowercase )
# matching characters
_A : str = get_matched_characters(__lowercase , __lowercase )
_A : Optional[Any] = get_matched_characters(__lowercase , __lowercase )
_A : Any = len(__lowercase )
# transposition
_A : str = (
len([(ca, ca) for ca, ca in zip(__lowercase , __lowercase ) if ca != ca] ) // 2
)
if not match_count:
_A : Optional[int] = 0.0
else:
_A : str = (
1
/ 3
* (
match_count / len(__lowercase )
+ match_count / len(__lowercase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_A : Any = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 206 | 1 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = torch.nn.Linear(10 , 10 )
lowerCamelCase_ = torch.optim.SGD(model.parameters() , 0.1 )
lowerCamelCase_ = Accelerator()
lowerCamelCase_ = accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state() | 702 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _lowerCAmelCase ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase_ = image_classifier(UpperCamelCase__ , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCamelCase__ ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
lowerCamelCase_ = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
] , )
@require_tf
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase_ = image_classifier(UpperCamelCase__ , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
lowerCamelCase_ = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
[
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
{'''score''': 0.333, '''label''': ANY(UpperCamelCase__ )},
],
] , )
@slow
@require_torch
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase_ = image_classifier(UpperCamelCase__ , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCamelCase_ = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase_ = image_classifier(UpperCamelCase__ , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
lowerCamelCase_ = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , ) | 66 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = MobileBertConfig.from_json_file(__UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE_ : str = MobileBertForPreTraining(__UpperCAmelCase )
# Load weights from tf checkpoint
SCREAMING_SNAKE_CASE_ : int = load_tf_weights_in_mobilebert(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , __UpperCAmelCase )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 421 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["DeiTFeatureExtractor"]
__A = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 586 | 0 |
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
_UpperCAmelCase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
_UpperCAmelCase = """main"""
# Default branch name
_UpperCAmelCase = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
_UpperCAmelCase = """aaaaaaa"""
# This commit does not exist, so we should 404.
_UpperCAmelCase = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
_UpperCAmelCase = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def __magic_name__ ( ):
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def __magic_name__ ( ):
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class a ( unittest.TestCase ):
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Dict ) -> Any:
'''simple docstring'''
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def lowerCamelCase__ ( self : str , lowerCAmelCase : int ) -> Tuple:
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def lowerCamelCase__ ( self : int ) -> Tuple:
'''simple docstring'''
self.assertEqual(find_labels(lowerCAmelCase ) , ["""labels"""] )
self.assertEqual(find_labels(lowerCAmelCase ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(lowerCAmelCase ) , ["""start_positions""", """end_positions"""] )
class a ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(lowerCAmelCase ) , ["""labels"""] )
@require_tf
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(find_labels(lowerCAmelCase ) , ["""labels"""] )
self.assertEqual(find_labels(lowerCAmelCase ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(lowerCAmelCase ) , ["""start_positions""", """end_positions"""] )
class a ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(lowerCAmelCase ) , ["""labels"""] )
@require_flax
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
self.assertEqual(find_labels(lowerCAmelCase ) , [] )
self.assertEqual(find_labels(lowerCAmelCase ) , [] )
self.assertEqual(find_labels(lowerCAmelCase ) , [] )
class a ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(lowerCAmelCase ) , [] )
| 36 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36 | 1 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=False , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=1_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ) -> Dict:
'''simple docstring'''
snake_case_ : Any = parent
snake_case_ : str = batch_size
snake_case_ : Dict = seq_length
snake_case_ : List[str] = is_training
snake_case_ : Tuple = use_input_mask
snake_case_ : str = use_token_type_ids
snake_case_ : Any = use_labels
snake_case_ : Any = vocab_size
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : List[str] = hidden_act
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Optional[Any] = type_vocab_size
snake_case_ : List[str] = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : List[str] = num_labels
snake_case_ : Optional[Any] = num_choices
snake_case_ : str = scope
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : int = None
if self.use_input_mask:
snake_case_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[Any] = None
snake_case_ : Optional[Any] = None
snake_case_ : Optional[int] = None
if self.use_labels:
snake_case_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : str = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Dict = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_lowercase , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , )
return config
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = EsmForProteinFolding(config=_lowercase ).float()
model.to(_lowercase )
model.eval()
snake_case_ : Tuple = model(_lowercase , attention_mask=_lowercase )
snake_case_ : int = model(_lowercase )
snake_case_ : Any = model(_lowercase )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : Dict = config_and_inputs
snake_case_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = False
_lowerCamelCase = (EsmForProteinFolding,) if is_torch_available() else ()
_lowerCamelCase = ()
_lowerCamelCase = {} if is_torch_available() else {}
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : str = EsmFoldModelTester(self )
snake_case_ : Union[str, Any] = ConfigTester(self , config_class=_lowercase , hidden_size=3_7 )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip("""Does not support attention outputs""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold only has one output format.""" )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support data parallel.""" )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
@require_torch
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
snake_case_ : Any = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
snake_case_ : Tuple = model(_lowercase )["""positions"""]
snake_case_ : Any = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _lowercase , atol=1E-4 ) )
| 58 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class UpperCAmelCase :
def __init__( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=9_9 , __lowerCamelCase : int=1_3 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Dict=9 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : List[Any]=3_2 , __lowerCamelCase : int=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Tuple=3_7 , __lowerCamelCase : Any=8 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Any=0.0_02 , __lowerCamelCase : List[str]=1 , __lowerCamelCase : str=0 , __lowerCamelCase : str=0 , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , ):
UpperCAmelCase__ :Tuple = parent
UpperCAmelCase__ :str = batch_size
UpperCAmelCase__ :int = encoder_seq_length
UpperCAmelCase__ :Optional[int] = decoder_seq_length
# For common tests
UpperCAmelCase__ :int = self.decoder_seq_length
UpperCAmelCase__ :List[Any] = is_training
UpperCAmelCase__ :Any = use_attention_mask
UpperCAmelCase__ :Tuple = use_labels
UpperCAmelCase__ :Optional[int] = vocab_size
UpperCAmelCase__ :Optional[Any] = hidden_size
UpperCAmelCase__ :Optional[Any] = num_hidden_layers
UpperCAmelCase__ :Tuple = num_attention_heads
UpperCAmelCase__ :str = d_ff
UpperCAmelCase__ :Tuple = relative_attention_num_buckets
UpperCAmelCase__ :int = dropout_rate
UpperCAmelCase__ :Dict = initializer_factor
UpperCAmelCase__ :int = eos_token_id
UpperCAmelCase__ :Tuple = pad_token_id
UpperCAmelCase__ :Tuple = decoder_start_token_id
UpperCAmelCase__ :List[str] = None
UpperCAmelCase__ :List[str] = decoder_layers
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return TaConfig.from_pretrained('''google/umt5-base''' )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[Any]=None , ):
if attention_mask is None:
UpperCAmelCase__ :Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCAmelCase__ :Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCAmelCase__ :List[str] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__lowerCamelCase )
if decoder_head_mask is None:
UpperCAmelCase__ :Optional[Any] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__lowerCamelCase )
if cross_attn_head_mask is None:
UpperCAmelCase__ :List[Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
UpperCAmelCase__ :Optional[int] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
UpperCAmelCase__ :List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCAmelCase__ :int = input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase__ :Tuple = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase__ :Dict = self.get_config()
UpperCAmelCase__ :Union[str, Any] = config.num_attention_heads
UpperCAmelCase__ :Dict = self.prepare_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, input_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
UpperCAmelCase__ , UpperCAmelCase__ :int = self.prepare_config_and_inputs()
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , ):
UpperCAmelCase__ :str = UMTaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCAmelCase__ :Optional[int] = model(
input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , )
UpperCAmelCase__ :List[Any] = model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase )
UpperCAmelCase__ :Dict = result.last_hidden_state
UpperCAmelCase__ :Tuple = result.past_key_values
UpperCAmelCase__ :Dict = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , ):
UpperCAmelCase__ :Tuple = UMTaModel(config=__lowerCamelCase ).get_decoder().to(__lowerCamelCase ).eval()
# first forward pass
UpperCAmelCase__ :Dict = model(__lowerCamelCase , use_cache=__lowerCamelCase )
UpperCAmelCase__ :Any = model(__lowerCamelCase )
UpperCAmelCase__ :List[str] = model(__lowerCamelCase , use_cache=__lowerCamelCase )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) + 1 )
UpperCAmelCase__ , UpperCAmelCase__ :int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ :int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
UpperCAmelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ :Union[str, Any] = model(__lowerCamelCase )['''last_hidden_state''']
UpperCAmelCase__ :str = model(__lowerCamelCase , past_key_values=__lowerCamelCase )['''last_hidden_state''']
# select random slice
UpperCAmelCase__ :Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ :Any = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase__ :Union[str, Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : str , ):
UpperCAmelCase__ :Optional[Any] = UMTaModel(config=__lowerCamelCase ).to(__lowerCamelCase ).half().eval()
UpperCAmelCase__ :Any = model(**__lowerCamelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__lowerCamelCase ).any().item() )
@require_torch
class UpperCAmelCase ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
UpperCAmelCase = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase = [0.8, 0.9]
def __SCREAMING_SNAKE_CASE ( self : str ):
UpperCAmelCase__ :Optional[Any] = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __SCREAMING_SNAKE_CASE ( self : int ):
UpperCAmelCase__ :int = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ :Optional[int] = UMTaModel(config_and_inputs[0] ).to(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=__lowerCamelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
UpperCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
UpperCAmelCase__ :Tuple = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
UpperCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ :int = config_and_inputs[0]
UpperCAmelCase__ :Optional[Any] = UMTaForConditionalGeneration(__lowerCamelCase ).eval()
model.to(__lowerCamelCase )
UpperCAmelCase__ :List[str] = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__lowerCamelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCamelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCamelCase ),
}
for attn_name, (name, mask) in zip(__lowerCamelCase , head_masking.items() ):
UpperCAmelCase__ :Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
UpperCAmelCase__ :Optional[Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=__lowerCamelCase )
UpperCAmelCase__ :Union[str, Any] = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__lowerCamelCase , return_dict_in_generate=__lowerCamelCase , **__lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
UpperCAmelCase__ :List[Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __SCREAMING_SNAKE_CASE ( self : Any ):
UpperCAmelCase__ :Optional[Any] = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__lowerCamelCase ).to(__lowerCamelCase )
UpperCAmelCase__ :List[str] = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__lowerCamelCase , legacy=__lowerCamelCase )
UpperCAmelCase__ :Dict = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
UpperCAmelCase__ :Optional[Any] = tokenizer(__lowerCamelCase , return_tensors='''pt''' , padding=__lowerCamelCase ).input_ids
# fmt: off
UpperCAmelCase__ :List[Any] = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ :Dict = model.generate(input_ids.to(__lowerCamelCase ) )
UpperCAmelCase__ :str = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
UpperCAmelCase__ :Dict = tokenizer.batch_decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 467 | 0 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
if num < 0:
return False
_lowerCAmelCase = num
_lowerCAmelCase = 0
while num > 0:
_lowerCAmelCase = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 491 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_snake_case = logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str
SCREAMING_SNAKE_CASE_: List[str]
SCREAMING_SNAKE_CASE_: Optional[List[str]]
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[int]
SCREAMING_SNAKE_CASE_: List[int]
SCREAMING_SNAKE_CASE_: Optional[List[int]] = None
SCREAMING_SNAKE_CASE_: Optional[List[int]] = None
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] = "train"
SCREAMING_SNAKE_CASE_: Tuple = "dev"
SCREAMING_SNAKE_CASE_: List[str] = "test"
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __lowerCamelCase ( UpperCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __lowerCamelCase ( UpperCAmelCase_ : List[InputExample] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : str="[SEP]" , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : List[Any]=-100 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : List[Any]=True , ) -> List[InputFeatures]:
"""simple docstring"""
_lowerCAmelCase = {label: i for i, label in enumerate(UpperCAmelCase_ )}
_lowerCAmelCase = []
for ex_index, example in enumerate(UpperCAmelCase_ ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' , UpperCAmelCase_ , len(UpperCAmelCase_ ) )
_lowerCAmelCase = []
_lowerCAmelCase = []
for word, label in zip(example.words , example.labels ):
_lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(UpperCAmelCase_ ) > 0:
tokens.extend(UpperCAmelCase_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(UpperCAmelCase_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_lowerCAmelCase = tokenizer.num_special_tokens_to_add()
if len(UpperCAmelCase_ ) > max_seq_length - special_tokens_count:
_lowerCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
_lowerCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_lowerCAmelCase = [sequence_a_segment_id] * len(UpperCAmelCase_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_lowerCAmelCase = [cls_token] + tokens
_lowerCAmelCase = [pad_token_label_id] + label_ids
_lowerCAmelCase = [cls_token_segment_id] + segment_ids
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_lowerCAmelCase = [1 if mask_padding_with_zero else 0] * len(UpperCAmelCase_ )
# Zero-pad up to the sequence length.
_lowerCAmelCase = max_seq_length - len(UpperCAmelCase_ )
if pad_on_left:
_lowerCAmelCase = ([pad_token] * padding_length) + input_ids
_lowerCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_lowerCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
_lowerCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(UpperCAmelCase_ ) == max_seq_length
assert len(UpperCAmelCase_ ) == max_seq_length
assert len(UpperCAmelCase_ ) == max_seq_length
assert len(UpperCAmelCase_ ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(UpperCAmelCase_ ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(UpperCAmelCase_ ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(UpperCAmelCase_ ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(UpperCAmelCase_ ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(UpperCAmelCase_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_lowerCAmelCase = None
features.append(
InputFeatures(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , label_ids=UpperCAmelCase_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[InputFeatures]
SCREAMING_SNAKE_CASE_: int = nn.CrossEntropyLoss().ignore_index
def __init__( self : List[Any] , UpperCAmelCase_ : TokenClassificationTask , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Split = Split.train , ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = os.path.join(
UpperCAmelCase_ , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(UpperCAmelCase_ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_ ):
if os.path.exists(UpperCAmelCase_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
_lowerCAmelCase = torch.load(UpperCAmelCase_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
_lowerCAmelCase = token_classification_task.read_examples_from_file(UpperCAmelCase_ , UpperCAmelCase_ )
# TODO clean up all this to leverage built-in features of tokenizers
_lowerCAmelCase = token_classification_task.convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCAmelCase_ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , UpperCAmelCase_ )
def __len__( self : Dict ) -> int:
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , UpperCAmelCase_ : List[Any] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[InputFeatures]
SCREAMING_SNAKE_CASE_: int = -1_0_0
def __init__( self : Tuple , UpperCAmelCase_ : TokenClassificationTask , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Split = Split.train , ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = token_classification_task.read_examples_from_file(UpperCAmelCase_ , UpperCAmelCase_ )
# TODO clean up all this to leverage built-in features of tokenizers
_lowerCAmelCase = token_classification_task.convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCAmelCase_ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_lowerCAmelCase = tf.data.Dataset.from_generator(
UpperCAmelCase_ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_lowerCAmelCase = tf.data.Dataset.from_generator(
UpperCAmelCase_ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __lowerCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : int , UpperCAmelCase_ : List[Any] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
| 491 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def snake_case_ ( __snake_case : float , __snake_case : float , __snake_case : float) -> dict[str, float]:
if (resistance, reactance, impedance).count(0) != 1:
raise ValueError('''One and only one argument must be 0''')
if resistance == 0:
return {"resistance": sqrt(pow(_UpperCAmelCase , 2) - pow(_UpperCAmelCase , 2))}
elif reactance == 0:
return {"reactance": sqrt(pow(_UpperCAmelCase , 2) - pow(_UpperCAmelCase , 2))}
elif impedance == 0:
return {"impedance": sqrt(pow(_UpperCAmelCase , 2) + pow(_UpperCAmelCase , 2))}
else:
raise ValueError('''Exactly one argument must be 0''')
if __name__ == "__main__":
import doctest
doctest.testmod()
| 274 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
A_ = HfArgumentParser(_UpperCAmelCase )
A_ = parser.parse_args_into_dataclasses()[0]
A_ = TensorFlowBenchmark(args=_UpperCAmelCase )
try:
A_ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
A_ = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
A_ = ''' '''.join(str(_UpperCAmelCase ).split(''' ''' )[:-1] )
A_ = ''''''
A_ = eval(str(_UpperCAmelCase ).split(''' ''' )[-1] )
A_ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
A_ = full_error_msg + begin_error_msg + str(_UpperCAmelCase )
raise ValueError(_UpperCAmelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 188 | 0 |
class lowercase :
def __init__( self , A_ ) -> List[str]:
"""simple docstring"""
# we need a list not a string, so do something to change the type
UpperCamelCase = arr.split(',' )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = [int(self.array[0] )] * len(self.array )
UpperCamelCase = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
UpperCamelCase = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
UpperCamelCase = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_UpperCAmelCase : int = input("please input some numbers:")
_UpperCAmelCase : int = SubArray(whole_array)
_UpperCAmelCase : Union[str, Any] = array.solve_sub_array()
print(("the results is:", re))
| 3 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase : List[str] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_UpperCAmelCase : Optional[int] = {
"camembert-base": 512,
}
_UpperCAmelCase : Union[str, Any] = "▁"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ["input_ids", "attention_mask"]
__lowercase : Tuple = CamembertTokenizer
def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ) -> List[Any]:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 3 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_lowercase : Optional[Any] =NewType("DataClass", Any)
_lowercase : List[Any] =NewType("DataClassType", Any)
def lowerCAmelCase_ ( _lowercase : Dict) -> str:
"""simple docstring"""
if isinstance(_lowercase , _lowercase):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''')
def lowerCAmelCase_ ( _lowercase : list) -> Callable[[str], Any]:
"""simple docstring"""
a__ : List[Any] = {str(_lowercase): choice for choice in choices}
return lambda _lowercase: str_to_choice.get(_lowercase , _lowercase)
def lowerCAmelCase_ ( *,
_lowercase : Union[str, List[str]] = None , _lowercase : str = None , _lowercase : Any = dataclasses.MISSING , _lowercase : Callable[[], Any] = dataclasses.MISSING , _lowercase : dict = None , **_lowercase : Dict , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
a__ : Optional[int] = {}
if aliases is not None:
a__ : List[Any] = aliases
if help is not None:
a__ : List[str] = help
return dataclasses.field(metadata=_lowercase , default=_lowercase , default_factory=_lowercase , **_lowercase)
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :Iterable[DataClassType]
def __init__( self , __lowercase , **__lowercase ) -> Dict:
"""simple docstring"""
if "formatter_class" not in kwargs:
a__ : Tuple = ArgumentDefaultsHelpFormatter
super().__init__(**__lowercase )
if dataclasses.is_dataclass(__lowercase ):
a__ : Any = [dataclass_types]
a__ : List[Any] = list(__lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowercase )
@staticmethod
def SCREAMING_SNAKE_CASE__( __lowercase , __lowercase ) -> Any:
"""simple docstring"""
a__ : Dict = F'''--{field.name}'''
a__ : int = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowercase ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
a__ : List[str] = kwargs.pop("""aliases""" , [] )
if isinstance(__lowercase , __lowercase ):
a__ : str = [aliases]
a__ : int = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(__lowercase , """UnionType""" ) and isinstance(__lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowercase ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
F''' Problem encountered in field \'{field.name}\'.''' )
if type(__lowercase ) not in field.type.__args__:
# filter `str` in Union
a__ : int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
a__ : str = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
a__ : Optional[Any] = (
field.type.__args__[0] if isinstance(__lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
a__ : str = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
a__ : int = {}
if origin_type is Literal or (isinstance(field.type , __lowercase ) and issubclass(field.type , __lowercase )):
if origin_type is Literal:
a__ : int = field.type.__args__
else:
a__ : List[str] = [x.value for x in field.type]
a__ : Dict = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
a__ : str = field.default
else:
a__ : Tuple = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
a__ : Optional[int] = copy(__lowercase )
# Hack because type=bool in argparse does not behave as we want.
a__ : Dict = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
a__ : Tuple = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
a__ : List[str] = default
# This tells argparse we accept 0 or 1 value after --field_name
a__ : List[Any] = """?"""
# This is the value that will get picked if we do --field_name (without value)
a__ : Optional[int] = True
elif isclass(__lowercase ) and issubclass(__lowercase , __lowercase ):
a__ : Dict = field.type.__args__[0]
a__ : str = """+"""
if field.default_factory is not dataclasses.MISSING:
a__ : List[str] = field.default_factory()
elif field.default is dataclasses.MISSING:
a__ : Union[str, Any] = True
else:
a__ : Optional[int] = field.type
if field.default is not dataclasses.MISSING:
a__ : Dict = field.default
elif field.default_factory is not dataclasses.MISSING:
a__ : Union[str, Any] = field.default_factory()
else:
a__ : int = True
parser.add_argument(__lowercase , *__lowercase , **__lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
a__ : Dict = False
parser.add_argument(F'''--no_{field.name}''' , action="""store_false""" , dest=field.name , **__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[Any]:
"""simple docstring"""
if hasattr(__lowercase , """_argument_group_name""" ):
a__ : Union[str, Any] = self.add_argument_group(dtype._argument_group_name )
else:
a__ : int = self
try:
a__ : Dict[str, type] = get_type_hints(__lowercase )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(__lowercase ):
a__ : Optional[int] = """.""".join(map(__lowercase , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(__lowercase ):
if not field.init:
continue
a__ : Tuple = type_hints[field.name]
self._parse_dataclass_field(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=None , __lowercase=None , ) -> Tuple[DataClass, ...]:
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
a__ : List[str] = []
if args_filename:
args_files.append(Path(__lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
a__ : int = ArgumentParser()
args_file_parser.add_argument(__lowercase , type=__lowercase , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
a__ , a__ : Union[str, Any] = args_file_parser.parse_known_args(args=__lowercase )
a__ : str = vars(__lowercase ).get(args_file_flag.lstrip("""-""" ) , __lowercase )
if cmd_args_file_paths:
args_files.extend([Path(__lowercase ) for p in cmd_args_file_paths] )
a__ : Union[str, Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
a__ : Tuple = file_args + args if args is not None else file_args + sys.argv[1:]
a__ , a__ : Tuple = self.parse_known_args(args=__lowercase )
a__ : List[str] = []
for dtype in self.dataclass_types:
a__ : Optional[Any] = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
a__ : int = {k: v for k, v in vars(__lowercase ).items() if k in keys}
for k in keys:
delattr(__lowercase , __lowercase )
a__ : Optional[int] = dtype(**__lowercase )
outputs.append(__lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
a__ : Union[str, Any] = set(args.keys() )
a__ : Tuple = []
for dtype in self.dataclass_types:
a__ : Any = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
a__ : Tuple = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
a__ : Optional[int] = dtype(**__lowercase )
outputs.append(__lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(__lowercase )}''' )
return tuple(__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
with open(Path(__lowercase ) , encoding="""utf-8""" ) as open_json_file:
a__ : List[str] = json.loads(open_json_file.read() )
a__ : List[str] = self.parse_dict(__lowercase , allow_extra_keys=__lowercase )
return tuple(__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
a__ : List[str] = self.parse_dict(yaml.safe_load(Path(__lowercase ).read_text() ) , allow_extra_keys=__lowercase )
return tuple(__lowercase )
| 136 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowercase : int =[
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def lowerCAmelCase_ ( _lowercase : List[str] , _lowercase : Tuple=None) -> Tuple:
"""simple docstring"""
require_version(deps[pkg] , _lowercase)
| 136 | 1 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = Path(__file__).parent / "model_card_template.md"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = uuida().hex
SCREAMING_SNAKE_CASE__ : Tuple = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
SCREAMING_SNAKE_CASE__ : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def A_ ( UpperCAmelCase__ = None ) -> str:
a : Optional[int] = F'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'; torch/{_torch_version}'
if is_flax_available():
ua += F'; jax/{_jax_version}'
ua += F'; flax/{_flax_version}'
if is_onnx_available():
ua += F'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
ua += "; " + "; ".join(F'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
ua += "; " + user_agent
return ua
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None ) -> Optional[Any]:
if token is None:
a : Dict = HfFolder.get_token()
if organization is None:
a : Union[str, Any] = whoami(UpperCAmelCase__ )['name']
return F'{username}/{model_id}'
else:
return F'{organization}/{model_id}'
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(UpperCAmelCase__ , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a : List[str] = args.hub_token if hasattr(UpperCAmelCase__ , 'hub_token' ) else None
a : Tuple = get_full_repo_name(UpperCAmelCase__ , token=UpperCAmelCase__ )
a : str = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=UpperCAmelCase__ , model_name=UpperCAmelCase__ , repo_name=UpperCAmelCase__ , dataset_name=args.dataset_name if hasattr(UpperCAmelCase__ , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(UpperCAmelCase__ , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(UpperCAmelCase__ , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(UpperCAmelCase__ , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(UpperCAmelCase__ , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(UpperCAmelCase__ , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(UpperCAmelCase__ , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(UpperCAmelCase__ , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(UpperCAmelCase__ , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(UpperCAmelCase__ , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(UpperCAmelCase__ , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a : List[Any] = os.path.join(args.output_dir , 'README.md' )
model_card.save(UpperCAmelCase__ )
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ = None ) -> str:
if resolved_file is None or commit_hash is not None:
return commit_hash
a : Dict = str(Path(UpperCAmelCase__ ).as_posix() )
a : List[Any] = re.search(r'snapshots/([^/]+)/' , UpperCAmelCase__ )
if search is None:
return None
a : Optional[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(UpperCAmelCase__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(hf_cache_home, "diffusers")
def A_ ( UpperCAmelCase__ = None , UpperCAmelCase__ = None ) -> None:
if new_cache_dir is None:
a : int = DIFFUSERS_CACHE
if old_cache_dir is None:
a : Optional[Any] = old_diffusers_cache
a : Optional[Any] = Path(UpperCAmelCase__ ).expanduser()
a : Optional[Any] = Path(UpperCAmelCase__ ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a : Union[str, Any] = new_cache_dir / old_blob_path.relative_to(UpperCAmelCase__ )
new_blob_path.parent.mkdir(parents=UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
os.replace(UpperCAmelCase__ , UpperCAmelCase__ )
try:
os.symlink(UpperCAmelCase__ , UpperCAmelCase__ )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
SCREAMING_SNAKE_CASE__ : str = 0
else:
with open(cache_version_file) as f:
try:
SCREAMING_SNAKE_CASE__ : str = int(f.read())
except ValueError:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
if cache_version < 1:
SCREAMING_SNAKE_CASE__ : Dict = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
SCREAMING_SNAKE_CASE__ : Optional[Any] = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
"the directory exists and can be written to."
)
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ = None ) -> str:
if variant is not None:
a : List[str] = weights_name.split('.' )
a : List[str] = splits[:-1] + [variant] + splits[-1:]
a : int = '.'.join(UpperCAmelCase__ )
return weights_name
def A_ ( UpperCAmelCase__ , *,
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , ) -> List[Any]:
a : str = str(UpperCAmelCase__ )
if os.path.isfile(UpperCAmelCase__ ):
return pretrained_model_name_or_path
elif os.path.isdir(UpperCAmelCase__ ):
if os.path.isfile(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) ):
# Load from a PyTorch checkpoint
a : str = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) ):
a : Optional[Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return model_file
else:
raise EnvironmentError(
F'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(UpperCAmelCase__ ).base_version ) >= version.parse('0.20.0' )
):
try:
a : List[Any] = hf_hub_download(
UpperCAmelCase__ , filename=_add_variant(UpperCAmelCase__ , UpperCAmelCase__ ) , cache_dir=UpperCAmelCase__ , force_download=UpperCAmelCase__ , proxies=UpperCAmelCase__ , resume_download=UpperCAmelCase__ , local_files_only=UpperCAmelCase__ , use_auth_token=UpperCAmelCase__ , user_agent=UpperCAmelCase__ , subfolder=UpperCAmelCase__ , revision=revision or commit_hash , )
warnings.warn(
F'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , UpperCAmelCase__ , )
return model_file
except: # noqa: E722
warnings.warn(
F'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(UpperCAmelCase__ , UpperCAmelCase__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(UpperCAmelCase__ , UpperCAmelCase__ )}\' so that the correct variant file can be added.' , UpperCAmelCase__ , )
try:
# 2. Load model file as usual
a : Dict = hf_hub_download(
UpperCAmelCase__ , filename=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , force_download=UpperCAmelCase__ , proxies=UpperCAmelCase__ , resume_download=UpperCAmelCase__ , local_files_only=UpperCAmelCase__ , use_auth_token=UpperCAmelCase__ , user_agent=UpperCAmelCase__ , subfolder=UpperCAmelCase__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
F'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
'this model name. Check the model page at '
F'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
F'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
F'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
F' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
F' directory containing a file named {weights_name} or'
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
F'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
F'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
F'containing a file named {weights_name}' )
| 509 |
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
SCREAMING_SNAKE_CASE__ : List[str] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 509 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ : Dict = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : List[Any] = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 115 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
A_ = ort.SessionOptions()
A_ = False
return options
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
A_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
A_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
A_ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case )
A_ = "A red cat sitting on a park bench"
A_ = np.random.RandomState(0 )
A_ = pipe(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type="np" , )
A_ = output.images
A_ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
A_ = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
A_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
A_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
A_ = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
A_ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case )
A_ = "A red cat sitting on a park bench"
A_ = np.random.RandomState(0 )
A_ = pipe(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=_snake_case , output_type="np" , )
A_ = output.images
A_ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
A_ = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 115 | 1 |
'''simple docstring'''
import requests
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> None:
'''simple docstring'''
snake_case_ = {'''Content-Type''': '''application/json'''}
snake_case_ = requests.post(SCREAMING_SNAKE_CASE_, json={'''text''': message_body}, headers=SCREAMING_SNAKE_CASE_ )
if response.status_code != 200:
snake_case_ = (
'''Request to slack returned an error '''
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 700 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
a : int = None
a : int = logging.get_logger(__name__)
a : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a : str = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
a : Any = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
a : Optional[int] = '▁'
# Segments (not really needed)
a : int = 0
a : Optional[Any] = 1
a : str = 2
a : str = 3
a : int = 4
class a ( _lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = "left"
snake_case_ = XLNetTokenizer
def __init__( self : int , lowercase_ : int=None , lowercase_ : Dict=None , lowercase_ : List[str]=False , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=False , lowercase_ : Optional[Any]="<s>" , lowercase_ : int="</s>" , lowercase_ : Optional[int]="<unk>" , lowercase_ : int="<sep>" , lowercase_ : Optional[int]="<pad>" , lowercase_ : Optional[Any]="<cls>" , lowercase_ : Tuple="<mask>" , lowercase_ : Any=["<eop>", "<eod>"] , **lowercase_ : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
vocab_file=lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
snake_case_ = 3
snake_case_ = do_lower_case
snake_case_ = remove_space
snake_case_ = keep_accents
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def A_ ( self : Tuple , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A_ ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def A_ ( self : int , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 593 | 0 |
from __future__ import annotations
def a_ ( lowerCAmelCase_ : list[float] ):
if len(lowerCAmelCase_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
__lowerCAmelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : int = """blip_text_model"""
def __init__( self , UpperCamelCase__=3_0524 , UpperCamelCase__=768 , UpperCamelCase__=768 , UpperCamelCase__=3072 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=8 , UpperCamelCase__=512 , UpperCamelCase__="gelu" , UpperCamelCase__=1e-12 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=3_0522 , UpperCamelCase__=2 , UpperCamelCase__=0 , UpperCamelCase__=102 , UpperCamelCase__=True , UpperCamelCase__=True , **UpperCamelCase__ , ) -> str:
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , sep_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : List[Any] = encoder_hidden_size
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : Tuple = projection_dim
lowerCamelCase : List[Any] = hidden_dropout_prob
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : List[Any] = num_attention_heads
lowerCamelCase : Optional[Any] = max_position_embeddings
lowerCamelCase : List[Any] = layer_norm_eps
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : str = initializer_range
lowerCamelCase : List[str] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = is_decoder
lowerCamelCase : List[Any] = use_cache
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Dict = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
lowerCamelCase : List[str] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Any = """blip_vision_model"""
def __init__( self , UpperCamelCase__=768 , UpperCamelCase__=3072 , UpperCamelCase__=512 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=384 , UpperCamelCase__=16 , UpperCamelCase__="gelu" , UpperCamelCase__=1e-5 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-10 , **UpperCamelCase__ , ) -> Any:
super().__init__(**UpperCamelCase__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : List[Any] = projection_dim
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : List[Any] = num_attention_heads
lowerCamelCase : int = patch_size
lowerCamelCase : Optional[Any] = image_size
lowerCamelCase : int = initializer_range
lowerCamelCase : Optional[int] = attention_dropout
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Optional[Any] = hidden_act
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
lowerCamelCase : str = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : int = """blip"""
lowerCamelCase_ : List[Any] = True
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=512 , UpperCamelCase__=2.6592 , UpperCamelCase__=256 , **UpperCamelCase__ , ) -> Optional[int]:
super().__init__(**UpperCamelCase__ )
if text_config is None:
lowerCamelCase : Union[str, Any] = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
lowerCamelCase : int = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
lowerCamelCase : int = BlipTextConfig(**UpperCamelCase__ )
lowerCamelCase : Tuple = BlipVisionConfig(**UpperCamelCase__ )
lowerCamelCase : Optional[int] = self.vision_config.hidden_size
lowerCamelCase : Tuple = projection_dim
lowerCamelCase : List[Any] = logit_scale_init_value
lowerCamelCase : str = 1.0
lowerCamelCase : Optional[int] = 0.02
lowerCamelCase : Dict = image_text_hidden_size
@classmethod
def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Dict = copy.deepcopy(self.__dict__ )
lowerCamelCase : str = self.text_config.to_dict()
lowerCamelCase : str = self.vision_config.to_dict()
lowerCamelCase : Any = self.__class__.model_type
return output
| 311 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def snake_case_ (__A : dict , __A : str , __A : set , __A : set , __A : dict , __A : dict , __A : PriorityQueue , __A : dict , __A : float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowerCAmelCase : int = cst_fwd.get(__A , np.inf )
__lowerCAmelCase : int = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowerCAmelCase : List[str] = new_cost_f
__lowerCAmelCase : int = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowerCAmelCase : Union[str, Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def snake_case_ (__A : str , __A : str , __A : dict , __A : dict ) -> int:
__lowerCAmelCase : List[str] = -1
__lowerCAmelCase : int = set()
__lowerCAmelCase : Any = set()
__lowerCAmelCase : int = {source: 0}
__lowerCAmelCase : Tuple = {destination: 0}
__lowerCAmelCase : Union[str, Any] = {source: None}
__lowerCAmelCase : Any = {destination: None}
__lowerCAmelCase : PriorityQueue[Any] = PriorityQueue()
__lowerCAmelCase : PriorityQueue[Any] = PriorityQueue()
__lowerCAmelCase : Union[str, Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowerCAmelCase ,__lowerCAmelCase : Union[str, Any] = queue_forward.get()
visited_forward.add(__A )
__lowerCAmelCase ,__lowerCAmelCase : Optional[int] = queue_backward.get()
visited_backward.add(__A )
__lowerCAmelCase : int = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
__lowerCAmelCase : Tuple = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowerCAmelCase : List[Any] = shortest_distance
return shortest_path_distance
__UpperCAmelCase = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
__UpperCAmelCase = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple=7 , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : str=30 , lowerCAmelCase : Optional[Any]=4_00 , lowerCAmelCase : int=True , lowerCAmelCase : Tuple=None , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[str]=1 / 2_55 , lowerCAmelCase : Any=True , ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
__lowerCAmelCase : Any = parent
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : Tuple = min_resolution
__lowerCAmelCase : int = max_resolution
__lowerCAmelCase : Any = do_resize
__lowerCAmelCase : Tuple = size
__lowerCAmelCase : str = do_normalize
__lowerCAmelCase : Dict = image_mean
__lowerCAmelCase : Optional[int] = image_std
__lowerCAmelCase : List[str] = do_rescale
__lowerCAmelCase : List[str] = rescale_factor
__lowerCAmelCase : str = do_pad
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Any , lowerCAmelCase : Dict=False ) -> str:
"""simple docstring"""
if not batched:
__lowerCAmelCase : Dict = image_inputs[0]
if isinstance(lowerCAmelCase , Image.Image ):
__lowerCAmelCase ,__lowerCAmelCase : str = image.size
else:
__lowerCAmelCase ,__lowerCAmelCase : Any = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase : List[str] = int(self.size["""shortest_edge"""] * h / w )
__lowerCAmelCase : List[Any] = self.size["""shortest_edge"""]
elif w > h:
__lowerCAmelCase : Union[str, Any] = self.size["""shortest_edge"""]
__lowerCAmelCase : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCAmelCase : Optional[Any] = self.size["""shortest_edge"""]
__lowerCAmelCase : Any = self.size["""shortest_edge"""]
else:
__lowerCAmelCase : Optional[Any] = []
for image in image_inputs:
__lowerCAmelCase ,__lowerCAmelCase : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase : Union[str, Any] = max(lowerCAmelCase , key=lambda lowerCAmelCase : item[0] )[0]
__lowerCAmelCase : List[str] = max(lowerCAmelCase , key=lambda lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Dict =YolosImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Any = YolosImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """size""" ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase )
__lowerCAmelCase : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
__lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase ,__lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase ,__lowerCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
__lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase ,__lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Any = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase ,__lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
__lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase ,__lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Optional[int] = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase ,__lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
__lowerCAmelCase : List[str] = self.image_processing_class(do_resize=lowerCAmelCase , do_normalize=lowerCAmelCase , do_rescale=lowerCAmelCase )
# create random PyTorch tensors
__lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__lowerCAmelCase : Optional[Any] = image_processing_a.pad(lowerCAmelCase , return_tensors="""pt""" )
__lowerCAmelCase : Optional[int] = image_processing_a(lowerCAmelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__lowerCAmelCase : List[str] = json.loads(f.read() )
__lowerCAmelCase : Tuple = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
__lowerCAmelCase : int = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
__lowerCAmelCase : List[Any] = image_processing(images=lowerCAmelCase , annotations=lowerCAmelCase , return_tensors="""pt""" )
# verify pixel values
__lowerCAmelCase : Dict = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCAmelCase )
__lowerCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCAmelCase , atol=1e-4 ) )
# verify area
__lowerCAmelCase : Union[str, Any] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCAmelCase ) )
# verify boxes
__lowerCAmelCase : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCAmelCase )
__lowerCAmelCase : Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCAmelCase , atol=1e-3 ) )
# verify image_id
__lowerCAmelCase : Optional[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCAmelCase ) )
# verify is_crowd
__lowerCAmelCase : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCAmelCase ) )
# verify class_labels
__lowerCAmelCase : Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCAmelCase ) )
# verify orig_size
__lowerCAmelCase : str = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCAmelCase ) )
# verify size
__lowerCAmelCase : Optional[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCAmelCase ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__lowerCAmelCase : int = json.loads(f.read() )
__lowerCAmelCase : str = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
__lowerCAmelCase : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__lowerCAmelCase : Any = YolosImageProcessor(format="""coco_panoptic""" )
__lowerCAmelCase : str = image_processing(images=lowerCAmelCase , annotations=lowerCAmelCase , masks_path=lowerCAmelCase , return_tensors="""pt""" )
# verify pixel values
__lowerCAmelCase : Tuple = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCAmelCase )
__lowerCAmelCase : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCAmelCase , atol=1e-4 ) )
# verify area
__lowerCAmelCase : Union[str, Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCAmelCase ) )
# verify boxes
__lowerCAmelCase : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCAmelCase )
__lowerCAmelCase : int = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCAmelCase , atol=1e-3 ) )
# verify image_id
__lowerCAmelCase : Tuple = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCAmelCase ) )
# verify is_crowd
__lowerCAmelCase : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCAmelCase ) )
# verify class_labels
__lowerCAmelCase : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCAmelCase ) )
# verify masks
__lowerCAmelCase : List[Any] = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowerCAmelCase )
# verify orig_size
__lowerCAmelCase : Dict = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCAmelCase ) )
# verify size
__lowerCAmelCase : Tuple = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCAmelCase ) )
| 218 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.